Compare commits
No commits in common. "5a31c05f71a9e8dc403d66573954c2a2f62b792a" and "86b1d27afb847b4aaeeb691d86bb04f05a52a343" have entirely different histories.
5a31c05f71
...
86b1d27afb
8
Makefile
8
Makefile
@ -1,6 +1,6 @@
|
|||||||
GO ?= go
|
GO ?= go
|
||||||
RM ?= rm
|
RM ?= rm
|
||||||
GOFLAGS ?= -v -ldflags "-w -X `go list`.Version=$(VERSION) -X `go list`.Commit=$(COMMIT) -X `go list`.Build=$(BUILD)" -mod=vendor
|
GOFLAGS ?= -v -ldflags "-w -X `${GO} list`.Version=$(VERSION) -X `${GO} list`.Build=$(BUILD)"
|
||||||
PREFIX ?= /usr/local
|
PREFIX ?= /usr/local
|
||||||
BINDIR ?= bin
|
BINDIR ?= bin
|
||||||
MANDIR ?= share/man
|
MANDIR ?= share/man
|
||||||
@ -8,10 +8,8 @@ MKDIR ?= mkdir
|
|||||||
CP ?= cp
|
CP ?= cp
|
||||||
SYSCONFDIR ?= /etc
|
SYSCONFDIR ?= /etc
|
||||||
|
|
||||||
VERSION = `git describe --abbrev=0 --tags 2>/dev/null || echo "$VERSION"`
|
VERSION = 0.5.0
|
||||||
COMMIT = `git rev-parse --short HEAD || echo "$COMMIT"`
|
BUILD = `date +%Y.%m.%d-%H:%M:%S`
|
||||||
BRANCH = `git rev-parse --abbrev-ref HEAD`
|
|
||||||
BUILD = `git show -s --pretty=format:%cI`
|
|
||||||
|
|
||||||
GOARCH ?= amd64
|
GOARCH ?= amd64
|
||||||
GOOS ?= linux
|
GOOS ?= linux
|
||||||
|
21
vendor/git.sr.ht/~emersion/go-scfg/LICENSE
vendored
21
vendor/git.sr.ht/~emersion/go-scfg/LICENSE
vendored
@ -1,21 +0,0 @@
|
|||||||
The MIT License (MIT)
|
|
||||||
|
|
||||||
Copyright (c) 2020 Simon Ser
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
|
||||||
copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE.
|
|
16
vendor/git.sr.ht/~emersion/go-scfg/README.md
vendored
16
vendor/git.sr.ht/~emersion/go-scfg/README.md
vendored
@ -1,16 +0,0 @@
|
|||||||
# go-scfg
|
|
||||||
|
|
||||||
[](https://godocs.io/git.sr.ht/~emersion/go-scfg)
|
|
||||||
|
|
||||||
Go library for [scfg].
|
|
||||||
|
|
||||||
## Contributing
|
|
||||||
|
|
||||||
Send patches on the [mailing list].
|
|
||||||
|
|
||||||
## License
|
|
||||||
|
|
||||||
MIT
|
|
||||||
|
|
||||||
[scfg]: https://git.sr.ht/~emersion/scfg
|
|
||||||
[mailing list]: https://lists.sr.ht/~emersion/public-inbox
|
|
132
vendor/git.sr.ht/~emersion/go-scfg/reader.go
vendored
132
vendor/git.sr.ht/~emersion/go-scfg/reader.go
vendored
@ -1,132 +0,0 @@
|
|||||||
// Package scfg parses configuration files.
|
|
||||||
package scfg
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/google/shlex"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Block is a list of directives.
|
|
||||||
type Block []*Directive
|
|
||||||
|
|
||||||
// GetAll returns a list of directives with the provided name.
|
|
||||||
func (blk Block) GetAll(name string) []*Directive {
|
|
||||||
l := make([]*Directive, 0, len(blk))
|
|
||||||
for _, child := range blk {
|
|
||||||
if child.Name == name {
|
|
||||||
l = append(l, child)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns the first directive with the provided name.
|
|
||||||
func (blk Block) Get(name string) *Directive {
|
|
||||||
for _, child := range blk {
|
|
||||||
if child.Name == name {
|
|
||||||
return child
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Directive is a configuration directive.
|
|
||||||
type Directive struct {
|
|
||||||
Name string
|
|
||||||
Params []string
|
|
||||||
|
|
||||||
Children Block
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseParams extracts parameters from the directive. It errors out if the
|
|
||||||
// user hasn't provided enough parameters.
|
|
||||||
func (d *Directive) ParseParams(params ...*string) error {
|
|
||||||
if len(d.Params) < len(params) {
|
|
||||||
return fmt.Errorf("directive %q: want %v params, got %v", d.Name, len(params), len(d.Params))
|
|
||||||
}
|
|
||||||
for i, ptr := range params {
|
|
||||||
if ptr == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
*ptr = d.Params[i]
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load loads a configuration file.
|
|
||||||
func Load(path string) (Block, error) {
|
|
||||||
f, err := os.Open(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
return Read(f)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read parses a configuration file from an io.Reader.
|
|
||||||
func Read(r io.Reader) (Block, error) {
|
|
||||||
scanner := bufio.NewScanner(r)
|
|
||||||
|
|
||||||
block, closingBrace, err := readBlock(scanner)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
} else if closingBrace {
|
|
||||||
return nil, fmt.Errorf("unexpected '}'")
|
|
||||||
}
|
|
||||||
|
|
||||||
return block, scanner.Err()
|
|
||||||
}
|
|
||||||
|
|
||||||
// readBlock reads a block. closingBrace is true if parsing stopped on '}'
|
|
||||||
// (otherwise, it stopped on Scanner.Scan).
|
|
||||||
func readBlock(scanner *bufio.Scanner) (block Block, closingBrace bool, err error) {
|
|
||||||
for scanner.Scan() {
|
|
||||||
l := scanner.Text()
|
|
||||||
words, err := shlex.Split(l)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, fmt.Errorf("failed to parse configuration file: %v", err)
|
|
||||||
} else if len(words) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(words) == 1 && l[len(l)-1] == '}' {
|
|
||||||
closingBrace = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
var d *Directive
|
|
||||||
if words[len(words)-1] == "{" && l[len(l)-1] == '{' {
|
|
||||||
words = words[:len(words)-1]
|
|
||||||
|
|
||||||
var name string
|
|
||||||
params := words
|
|
||||||
if len(words) > 0 {
|
|
||||||
name, params = words[0], words[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
childBlock, childClosingBrace, err := readBlock(scanner)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, err
|
|
||||||
} else if !childClosingBrace {
|
|
||||||
return nil, false, io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
|
|
||||||
// Allows callers to tell apart "no block" and "empty block"
|
|
||||||
if childBlock == nil {
|
|
||||||
childBlock = Block{}
|
|
||||||
}
|
|
||||||
|
|
||||||
d = &Directive{Name: name, Params: params, Children: childBlock}
|
|
||||||
} else {
|
|
||||||
d = &Directive{Name: words[0], Params: words[1:]}
|
|
||||||
}
|
|
||||||
block = append(block, d)
|
|
||||||
}
|
|
||||||
|
|
||||||
return block, closingBrace, nil
|
|
||||||
}
|
|
12
vendor/git.sr.ht/~sircmpwn/go-bare/.build.yml
vendored
12
vendor/git.sr.ht/~sircmpwn/go-bare/.build.yml
vendored
@ -1,12 +0,0 @@
|
|||||||
image: alpine/edge
|
|
||||||
packages:
|
|
||||||
- go
|
|
||||||
sources:
|
|
||||||
- https://git.sr.ht/~sircmpwn/go-bare
|
|
||||||
tasks:
|
|
||||||
- gen: |
|
|
||||||
cd go-bare
|
|
||||||
go generate ./...
|
|
||||||
- test: |
|
|
||||||
cd go-bare
|
|
||||||
go test ./...
|
|
@ -1,3 +0,0 @@
|
|||||||
*.test
|
|
||||||
*.prof
|
|
||||||
*.log
|
|
13
vendor/git.sr.ht/~sircmpwn/go-bare/LICENSE
vendored
13
vendor/git.sr.ht/~sircmpwn/go-bare/LICENSE
vendored
@ -1,13 +0,0 @@
|
|||||||
Copyright 2020 Drew DeVault
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
130
vendor/git.sr.ht/~sircmpwn/go-bare/README.md
vendored
130
vendor/git.sr.ht/~sircmpwn/go-bare/README.md
vendored
@ -1,130 +0,0 @@
|
|||||||
# go-bare [](https://godocs.io/git.sr.ht/~sircmpwn/go-bare) [](https://builds.sr.ht/~sircmpwn/go-bare?)
|
|
||||||
|
|
||||||
An implementation of the [BARE](https://baremessages.org) message format
|
|
||||||
for Go.
|
|
||||||
|
|
||||||
**Status**
|
|
||||||
|
|
||||||
This mostly works, but you may run into some edge cases with union types.
|
|
||||||
|
|
||||||
## Code generation
|
|
||||||
|
|
||||||
An example is provided in the `examples` directory. Here is a basic
|
|
||||||
introduction:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ cat schema.bare
|
|
||||||
type Address {
|
|
||||||
address: [4]string
|
|
||||||
city: string
|
|
||||||
state: string
|
|
||||||
country: string
|
|
||||||
}
|
|
||||||
$ go run git.sr.ht/~sircmpwn/go-bare/cmd/gen -p models schema.bare models/gen.go
|
|
||||||
```
|
|
||||||
|
|
||||||
Then you can write something like the following:
|
|
||||||
|
|
||||||
```go
|
|
||||||
import "models"
|
|
||||||
|
|
||||||
/* ... */
|
|
||||||
|
|
||||||
bytes := []byte{ /* ... */ }
|
|
||||||
var addr Address
|
|
||||||
err := addr.Decode(bytes)
|
|
||||||
```
|
|
||||||
|
|
||||||
You can also add custom types and skip generating them by passing the `-s
|
|
||||||
TypeName` flag to gen, then providing your own implementation. For example, to
|
|
||||||
rig up time.Time with a custom "Time" BARE type, add this to your BARE schema:
|
|
||||||
|
|
||||||
```
|
|
||||||
type Time string
|
|
||||||
```
|
|
||||||
|
|
||||||
Then pass `-s Time` to gen, and provide your own implementation of Time in the
|
|
||||||
same package. See `examples/time.go` for an example of such an implementation.
|
|
||||||
|
|
||||||
## Marshal usage
|
|
||||||
|
|
||||||
For many use-cases, it may be more convenient to write your types manually and
|
|
||||||
use Marshal and Unmarshal directly. If you choose this approach, you may also
|
|
||||||
use `git.sr.ht/~sircmpwn/go-bare/schema.SchemaFor` to generate a BARE schema
|
|
||||||
language document describing your structs.
|
|
||||||
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"git.sr.ht/~sircmpwn/go-bare"
|
|
||||||
)
|
|
||||||
|
|
||||||
// type Coordinates {
|
|
||||||
// x: int
|
|
||||||
// y: int
|
|
||||||
// z: int
|
|
||||||
// q: optional<int>
|
|
||||||
// }
|
|
||||||
type Coordinates struct {
|
|
||||||
X uint
|
|
||||||
Y uint
|
|
||||||
Z uint
|
|
||||||
Q *uint
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
var coords Coordinates
|
|
||||||
payload := []byte{0x01, 0x02, 0x03, 0x01, 0x04}
|
|
||||||
err := bare.Unmarshal(payload, &coords)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
fmt.Printf("coords: %d, %d, %d (%d)\n",
|
|
||||||
coords.X, coords.Y, coords.Z, *coords.Q) /* coords: 1, 2, 3 (4) */
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Unions
|
|
||||||
|
|
||||||
To use union types, you need to define an interface to represent the union of
|
|
||||||
possible values, and this interface needs to implement `bare.Union`:
|
|
||||||
|
|
||||||
```go
|
|
||||||
type Person interface {
|
|
||||||
Union
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Then, for each possible union type, implement the interface:
|
|
||||||
|
|
||||||
```go
|
|
||||||
type Employee struct { /* ... */ }
|
|
||||||
func (e Employee) IsUnion() {}
|
|
||||||
|
|
||||||
type Customer struct { /* ... */ }
|
|
||||||
func (c Customer) IsUnion() {}
|
|
||||||
```
|
|
||||||
|
|
||||||
The IsUnion function is necessary to make the type compatible with the Union
|
|
||||||
interface. Then, to marshal and unmarshal using this union type, you need to
|
|
||||||
tell go-bare about your union:
|
|
||||||
|
|
||||||
```go
|
|
||||||
func init() {
|
|
||||||
// The first argument is a pointer of the union interface, and the
|
|
||||||
// subsequent arguments are values of each possible subtype, in ascending
|
|
||||||
// order of union tag:
|
|
||||||
bare.RegisterUnion((*Person)(nil)).
|
|
||||||
Member(*new(Employee), 0).
|
|
||||||
Member(*new(Customer), 1)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
This is all done for you if you use code generation.
|
|
||||||
|
|
||||||
## Contributing, getting help
|
|
||||||
|
|
||||||
Send patches and questions to
|
|
||||||
[~sircmpwn/public-inbox@lists.sr.ht](mailto:~sircmpwn/public-inbox@lists.sr.ht)
|
|
17
vendor/git.sr.ht/~sircmpwn/go-bare/errors.go
vendored
17
vendor/git.sr.ht/~sircmpwn/go-bare/errors.go
vendored
@ -1,17 +0,0 @@
|
|||||||
package bare
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
)
|
|
||||||
|
|
||||||
var ErrInvalidStr = errors.New("String contains invalid UTF-8 sequences")
|
|
||||||
|
|
||||||
type UnsupportedTypeError struct {
|
|
||||||
Type reflect.Type
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *UnsupportedTypeError) Error() string {
|
|
||||||
return fmt.Sprintf("Unsupported type for marshaling: %s\n", e.Type.String())
|
|
||||||
}
|
|
55
vendor/git.sr.ht/~sircmpwn/go-bare/limit.go
vendored
55
vendor/git.sr.ht/~sircmpwn/go-bare/limit.go
vendored
@ -1,55 +0,0 @@
|
|||||||
package bare
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
maxUnmarshalBytes uint64 = 1024 * 1024 * 32 /* 32 MiB */
|
|
||||||
maxArrayLength uint64 = 1024 * 4 /* 4096 elements */
|
|
||||||
maxMapSize uint64 = 1024
|
|
||||||
)
|
|
||||||
|
|
||||||
// MaxUnmarshalBytes sets the maximum size of a message decoded by unmarshal.
|
|
||||||
// By default, this is set to 32 MiB.
|
|
||||||
func MaxUnmarshalBytes(bytes uint64) {
|
|
||||||
maxUnmarshalBytes = bytes
|
|
||||||
}
|
|
||||||
|
|
||||||
// MaxArrayLength sets maximum number of elements in array. Defaults to 4096 elements
|
|
||||||
func MaxArrayLength(length uint64) {
|
|
||||||
maxArrayLength = length
|
|
||||||
}
|
|
||||||
|
|
||||||
// MaxMapSize sets maximum size of map. Defaults to 1024 key/value pairs
|
|
||||||
func MaxMapSize(size uint64) {
|
|
||||||
maxMapSize = size
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use MaxUnmarshalBytes to prevent this error from occuring on messages which
|
|
||||||
// are large by design.
|
|
||||||
var ErrLimitExceeded = errors.New("Maximum message size exceeded")
|
|
||||||
|
|
||||||
// Identical to io.LimitedReader, except it returns our custom error instead of
|
|
||||||
// EOF if the limit is reached.
|
|
||||||
type limitedReader struct {
|
|
||||||
R io.Reader
|
|
||||||
N uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *limitedReader) Read(p []byte) (n int, err error) {
|
|
||||||
if l.N <= 0 {
|
|
||||||
return 0, ErrLimitExceeded
|
|
||||||
}
|
|
||||||
if uint64(len(p)) > l.N {
|
|
||||||
p = p[0:l.N]
|
|
||||||
}
|
|
||||||
n, err = l.R.Read(p)
|
|
||||||
l.N -= uint64(n)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func newLimitedReader(r io.Reader) *limitedReader {
|
|
||||||
return &limitedReader{r, maxUnmarshalBytes}
|
|
||||||
}
|
|
308
vendor/git.sr.ht/~sircmpwn/go-bare/marshal.go
vendored
308
vendor/git.sr.ht/~sircmpwn/go-bare/marshal.go
vendored
@ -1,308 +0,0 @@
|
|||||||
package bare
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A type which implements this interface will be responsible for marshaling
|
|
||||||
// itself when encountered.
|
|
||||||
type Marshalable interface {
|
|
||||||
Marshal(w *Writer) error
|
|
||||||
}
|
|
||||||
|
|
||||||
var encoderBufferPool = sync.Pool{
|
|
||||||
New: func() interface{} {
|
|
||||||
buf := &bytes.Buffer{}
|
|
||||||
buf.Grow(32)
|
|
||||||
return buf
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Marshals a value (val, which must be a pointer) into a BARE message.
|
|
||||||
//
|
|
||||||
// The encoding of each struct field can be customized by the format string
|
|
||||||
// stored under the "bare" key in the struct field's tag.
|
|
||||||
//
|
|
||||||
// As a special case, if the field tag is "-", the field is always omitted.
|
|
||||||
func Marshal(val interface{}) ([]byte, error) {
|
|
||||||
// reuse buffers from previous serializations
|
|
||||||
b := encoderBufferPool.Get().(*bytes.Buffer)
|
|
||||||
defer func() {
|
|
||||||
b.Reset()
|
|
||||||
encoderBufferPool.Put(b)
|
|
||||||
}()
|
|
||||||
|
|
||||||
w := NewWriter(b)
|
|
||||||
err := MarshalWriter(w, val)
|
|
||||||
|
|
||||||
msg := make([]byte, b.Len())
|
|
||||||
copy(msg, b.Bytes())
|
|
||||||
|
|
||||||
return msg, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Marshals a value (val, which must be a pointer) into a BARE message and
|
|
||||||
// writes it to a Writer. See Marshal for details.
|
|
||||||
func MarshalWriter(w *Writer, val interface{}) error {
|
|
||||||
t := reflect.TypeOf(val)
|
|
||||||
v := reflect.ValueOf(val)
|
|
||||||
if t.Kind() != reflect.Ptr {
|
|
||||||
return errors.New("Expected val to be pointer type")
|
|
||||||
}
|
|
||||||
|
|
||||||
return getEncoder(t.Elem())(w, v.Elem())
|
|
||||||
}
|
|
||||||
|
|
||||||
type encodeFunc func(w *Writer, v reflect.Value) error
|
|
||||||
|
|
||||||
var encodeFuncCache sync.Map // map[reflect.Type]encodeFunc
|
|
||||||
|
|
||||||
// get decoder from cache
|
|
||||||
func getEncoder(t reflect.Type) encodeFunc {
|
|
||||||
if f, ok := encodeFuncCache.Load(t); ok {
|
|
||||||
return f.(encodeFunc)
|
|
||||||
}
|
|
||||||
|
|
||||||
f := encoderFunc(t)
|
|
||||||
encodeFuncCache.Store(t, f)
|
|
||||||
return f
|
|
||||||
}
|
|
||||||
|
|
||||||
var marshalableInterface = reflect.TypeOf((*Unmarshalable)(nil)).Elem()
|
|
||||||
|
|
||||||
func encoderFunc(t reflect.Type) encodeFunc {
|
|
||||||
if reflect.PtrTo(t).Implements(marshalableInterface) {
|
|
||||||
return func(w *Writer, v reflect.Value) error {
|
|
||||||
uv := v.Addr().Interface().(Marshalable)
|
|
||||||
return uv.Marshal(w)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if t.Kind() == reflect.Interface && t.Implements(unionInterface) {
|
|
||||||
return encodeUnion(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
switch t.Kind() {
|
|
||||||
case reflect.Ptr:
|
|
||||||
return encodeOptional(t.Elem())
|
|
||||||
case reflect.Struct:
|
|
||||||
return encodeStruct(t)
|
|
||||||
case reflect.Array:
|
|
||||||
return encodeArray(t)
|
|
||||||
case reflect.Slice:
|
|
||||||
return encodeSlice(t)
|
|
||||||
case reflect.Map:
|
|
||||||
return encodeMap(t)
|
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
|
||||||
return encodeUint
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
return encodeInt
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
return encodeFloat
|
|
||||||
case reflect.Bool:
|
|
||||||
return encodeBool
|
|
||||||
case reflect.String:
|
|
||||||
return encodeString
|
|
||||||
}
|
|
||||||
|
|
||||||
return func(w *Writer, v reflect.Value) error {
|
|
||||||
return &UnsupportedTypeError{v.Type()}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func encodeOptional(t reflect.Type) encodeFunc {
|
|
||||||
return func(w *Writer, v reflect.Value) error {
|
|
||||||
if v.IsNil() {
|
|
||||||
return w.WriteBool(false)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := w.WriteBool(true); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return getEncoder(t)(w, v.Elem())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func encodeStruct(t reflect.Type) encodeFunc {
|
|
||||||
n := t.NumField()
|
|
||||||
encoders := make([]encodeFunc, n)
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
field := t.Field(i)
|
|
||||||
if field.Tag.Get("bare") == "-" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
encoders[i] = getEncoder(field.Type)
|
|
||||||
}
|
|
||||||
|
|
||||||
return func(w *Writer, v reflect.Value) error {
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
if encoders[i] == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
err := encoders[i](w, v.Field(i))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func encodeArray(t reflect.Type) encodeFunc {
|
|
||||||
f := getEncoder(t.Elem())
|
|
||||||
len := t.Len()
|
|
||||||
|
|
||||||
return func(w *Writer, v reflect.Value) error {
|
|
||||||
for i := 0; i < len; i++ {
|
|
||||||
if err := f(w, v.Index(i)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func encodeSlice(t reflect.Type) encodeFunc {
|
|
||||||
elem := t.Elem()
|
|
||||||
f := getEncoder(elem)
|
|
||||||
|
|
||||||
return func(w *Writer, v reflect.Value) error {
|
|
||||||
if err := w.WriteUint(uint64(v.Len())); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < v.Len(); i++ {
|
|
||||||
if err := f(w, v.Index(i)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func encodeMap(t reflect.Type) encodeFunc {
|
|
||||||
keyType := t.Key()
|
|
||||||
keyf := getEncoder(keyType)
|
|
||||||
|
|
||||||
valueType := t.Elem()
|
|
||||||
valf := getEncoder(valueType)
|
|
||||||
|
|
||||||
return func(w *Writer, v reflect.Value) error {
|
|
||||||
if err := w.WriteUint(uint64(v.Len())); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
iter := v.MapRange()
|
|
||||||
for iter.Next() {
|
|
||||||
if err := keyf(w, iter.Key()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := valf(w, iter.Value()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func encodeUnion(t reflect.Type) encodeFunc {
|
|
||||||
ut, ok := unionRegistry[t]
|
|
||||||
if !ok {
|
|
||||||
return func(w *Writer, v reflect.Value) error {
|
|
||||||
return fmt.Errorf("Union type %s is not registered", t.Name())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
encoders := make(map[uint64]encodeFunc)
|
|
||||||
for tag, t := range ut.types {
|
|
||||||
encoders[tag] = getEncoder(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
return func(w *Writer, v reflect.Value) error {
|
|
||||||
t := v.Elem().Type()
|
|
||||||
if t.Kind() == reflect.Ptr {
|
|
||||||
// If T is a valid union value type, *T is valid too.
|
|
||||||
t = t.Elem()
|
|
||||||
v = v.Elem()
|
|
||||||
}
|
|
||||||
tag, ok := ut.tags[t]
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("Invalid union value: %s", v.Elem().String())
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := w.WriteUint(tag); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return encoders[tag](w, v.Elem())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func encodeUint(w *Writer, v reflect.Value) error {
|
|
||||||
switch getIntKind(v.Type()) {
|
|
||||||
case reflect.Uint:
|
|
||||||
return w.WriteUint(v.Uint())
|
|
||||||
|
|
||||||
case reflect.Uint8:
|
|
||||||
return w.WriteU8(uint8(v.Uint()))
|
|
||||||
|
|
||||||
case reflect.Uint16:
|
|
||||||
return w.WriteU16(uint16(v.Uint()))
|
|
||||||
|
|
||||||
case reflect.Uint32:
|
|
||||||
return w.WriteU32(uint32(v.Uint()))
|
|
||||||
|
|
||||||
case reflect.Uint64:
|
|
||||||
return w.WriteU64(uint64(v.Uint()))
|
|
||||||
}
|
|
||||||
|
|
||||||
panic("not uint")
|
|
||||||
}
|
|
||||||
|
|
||||||
func encodeInt(w *Writer, v reflect.Value) error {
|
|
||||||
switch getIntKind(v.Type()) {
|
|
||||||
case reflect.Int:
|
|
||||||
return w.WriteInt(v.Int())
|
|
||||||
|
|
||||||
case reflect.Int8:
|
|
||||||
return w.WriteI8(int8(v.Int()))
|
|
||||||
|
|
||||||
case reflect.Int16:
|
|
||||||
return w.WriteI16(int16(v.Int()))
|
|
||||||
|
|
||||||
case reflect.Int32:
|
|
||||||
return w.WriteI32(int32(v.Int()))
|
|
||||||
|
|
||||||
case reflect.Int64:
|
|
||||||
return w.WriteI64(int64(v.Int()))
|
|
||||||
}
|
|
||||||
|
|
||||||
panic("not int")
|
|
||||||
}
|
|
||||||
|
|
||||||
func encodeFloat(w *Writer, v reflect.Value) error {
|
|
||||||
switch v.Type().Kind() {
|
|
||||||
case reflect.Float32:
|
|
||||||
return w.WriteF32(float32(v.Float()))
|
|
||||||
case reflect.Float64:
|
|
||||||
return w.WriteF64(v.Float())
|
|
||||||
}
|
|
||||||
|
|
||||||
panic("not float")
|
|
||||||
}
|
|
||||||
|
|
||||||
func encodeBool(w *Writer, v reflect.Value) error {
|
|
||||||
return w.WriteBool(v.Bool())
|
|
||||||
}
|
|
||||||
|
|
||||||
func encodeString(w *Writer, v reflect.Value) error {
|
|
||||||
if v.Kind() != reflect.String {
|
|
||||||
panic("not string")
|
|
||||||
}
|
|
||||||
return w.WriteString(v.String())
|
|
||||||
}
|
|
@ -1,8 +0,0 @@
|
|||||||
// An implementation of the BARE message format for Go.
|
|
||||||
//
|
|
||||||
// https://git.sr.ht/~sircmpwn/bare
|
|
||||||
//
|
|
||||||
// See the git repository for usage examples:
|
|
||||||
//
|
|
||||||
// https://git.sr.ht/~sircmpwn/go-bare
|
|
||||||
package bare
|
|
185
vendor/git.sr.ht/~sircmpwn/go-bare/reader.go
vendored
185
vendor/git.sr.ht/~sircmpwn/go-bare/reader.go
vendored
@ -1,185 +0,0 @@
|
|||||||
package bare
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"math"
|
|
||||||
"unicode/utf8"
|
|
||||||
)
|
|
||||||
|
|
||||||
type byteReader interface {
|
|
||||||
io.Reader
|
|
||||||
io.ByteReader
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Reader for BARE primitive types.
|
|
||||||
type Reader struct {
|
|
||||||
base byteReader
|
|
||||||
scratch [8]byte
|
|
||||||
}
|
|
||||||
|
|
||||||
type simpleByteReader struct {
|
|
||||||
io.Reader
|
|
||||||
scratch [1]byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r simpleByteReader) ReadByte() (byte, error) {
|
|
||||||
// using reference type here saves us allocations
|
|
||||||
_, err := r.Read(r.scratch[:])
|
|
||||||
return r.scratch[0], err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns a new BARE primitive reader wrapping the given io.Reader.
|
|
||||||
func NewReader(base io.Reader) *Reader {
|
|
||||||
br, ok := base.(byteReader)
|
|
||||||
if !ok {
|
|
||||||
br = simpleByteReader{Reader: base}
|
|
||||||
}
|
|
||||||
return &Reader{base: br}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Reader) ReadUint() (uint64, error) {
|
|
||||||
x, err := binary.ReadUvarint(r.base)
|
|
||||||
if err != nil {
|
|
||||||
return x, err
|
|
||||||
}
|
|
||||||
return x, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Reader) ReadU8() (uint8, error) {
|
|
||||||
return r.base.ReadByte()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Reader) ReadU16() (uint16, error) {
|
|
||||||
var i uint16
|
|
||||||
if _, err := io.ReadAtLeast(r.base, r.scratch[:2], 2); err != nil {
|
|
||||||
return i, err
|
|
||||||
}
|
|
||||||
return binary.LittleEndian.Uint16(r.scratch[:]), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Reader) ReadU32() (uint32, error) {
|
|
||||||
var i uint32
|
|
||||||
if _, err := io.ReadAtLeast(r.base, r.scratch[:4], 4); err != nil {
|
|
||||||
return i, err
|
|
||||||
}
|
|
||||||
return binary.LittleEndian.Uint32(r.scratch[:]), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Reader) ReadU64() (uint64, error) {
|
|
||||||
var i uint64
|
|
||||||
if _, err := io.ReadAtLeast(r.base, r.scratch[:8], 8); err != nil {
|
|
||||||
return i, err
|
|
||||||
}
|
|
||||||
return binary.LittleEndian.Uint64(r.scratch[:]), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Reader) ReadInt() (int64, error) {
|
|
||||||
return binary.ReadVarint(r.base)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Reader) ReadI8() (int8, error) {
|
|
||||||
b, err := r.base.ReadByte()
|
|
||||||
return int8(b), err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Reader) ReadI16() (int16, error) {
|
|
||||||
var i int16
|
|
||||||
if _, err := io.ReadAtLeast(r.base, r.scratch[:2], 2); err != nil {
|
|
||||||
return i, err
|
|
||||||
}
|
|
||||||
return int16(binary.LittleEndian.Uint16(r.scratch[:])), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Reader) ReadI32() (int32, error) {
|
|
||||||
var i int32
|
|
||||||
if _, err := io.ReadAtLeast(r.base, r.scratch[:4], 4); err != nil {
|
|
||||||
return i, err
|
|
||||||
}
|
|
||||||
return int32(binary.LittleEndian.Uint32(r.scratch[:])), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Reader) ReadI64() (int64, error) {
|
|
||||||
var i int64
|
|
||||||
if _, err := io.ReadAtLeast(r.base, r.scratch[:], 8); err != nil {
|
|
||||||
return i, err
|
|
||||||
}
|
|
||||||
return int64(binary.LittleEndian.Uint64(r.scratch[:])), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Reader) ReadF32() (float32, error) {
|
|
||||||
u, err := r.ReadU32()
|
|
||||||
f := math.Float32frombits(u)
|
|
||||||
if math.IsNaN(float64(f)) {
|
|
||||||
return 0.0, fmt.Errorf("NaN is not permitted in BARE floats")
|
|
||||||
}
|
|
||||||
return f, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Reader) ReadF64() (float64, error) {
|
|
||||||
u, err := r.ReadU64()
|
|
||||||
f := math.Float64frombits(u)
|
|
||||||
if math.IsNaN(f) {
|
|
||||||
return 0.0, fmt.Errorf("NaN is not permitted in BARE floats")
|
|
||||||
}
|
|
||||||
return f, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Reader) ReadBool() (bool, error) {
|
|
||||||
b, err := r.ReadU8()
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if b > 1 {
|
|
||||||
return false, fmt.Errorf("Invalid bool value: %#x", b)
|
|
||||||
}
|
|
||||||
|
|
||||||
return b == 1, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Reader) ReadString() (string, error) {
|
|
||||||
buf, err := r.ReadData()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
if !utf8.Valid(buf) {
|
|
||||||
return "", ErrInvalidStr
|
|
||||||
}
|
|
||||||
return string(buf), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reads a fixed amount of arbitrary data, defined by the length of the slice.
|
|
||||||
func (r *Reader) ReadDataFixed(dest []byte) error {
|
|
||||||
var amt int = 0
|
|
||||||
for amt < len(dest) {
|
|
||||||
n, err := r.base.Read(dest[amt:])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
amt += n
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reads arbitrary data whose length is read from the message.
|
|
||||||
func (r *Reader) ReadData() ([]byte, error) {
|
|
||||||
l, err := r.ReadUint()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if l >= maxUnmarshalBytes {
|
|
||||||
return nil, ErrLimitExceeded
|
|
||||||
}
|
|
||||||
buf := make([]byte, l)
|
|
||||||
var amt uint64 = 0
|
|
||||||
for amt < l {
|
|
||||||
n, err := r.base.Read(buf[amt:])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
amt += uint64(n)
|
|
||||||
}
|
|
||||||
return buf, nil
|
|
||||||
}
|
|
76
vendor/git.sr.ht/~sircmpwn/go-bare/unions.go
vendored
76
vendor/git.sr.ht/~sircmpwn/go-bare/unions.go
vendored
@ -1,76 +0,0 @@
|
|||||||
package bare
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Any type which is a union member must implement this interface. You must
|
|
||||||
// also call RegisterUnion for go-bare to marshal or unmarshal messages which
|
|
||||||
// utilize your union type.
|
|
||||||
type Union interface {
|
|
||||||
IsUnion()
|
|
||||||
}
|
|
||||||
|
|
||||||
type UnionTags struct {
|
|
||||||
iface reflect.Type
|
|
||||||
tags map[reflect.Type]uint64
|
|
||||||
types map[uint64]reflect.Type
|
|
||||||
}
|
|
||||||
|
|
||||||
var unionInterface = reflect.TypeOf((*Union)(nil)).Elem()
|
|
||||||
var unionRegistry map[reflect.Type]*UnionTags
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
unionRegistry = make(map[reflect.Type]*UnionTags)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Registers a union type in this context. Pass the union interface and the
|
|
||||||
// list of types associated with it, sorted ascending by their union tag.
|
|
||||||
func RegisterUnion(iface interface{}) *UnionTags {
|
|
||||||
ity := reflect.TypeOf(iface).Elem()
|
|
||||||
if _, ok := unionRegistry[ity]; ok {
|
|
||||||
panic(fmt.Errorf("Type %s has already been registered", ity.Name()))
|
|
||||||
}
|
|
||||||
|
|
||||||
if !ity.Implements(reflect.TypeOf((*Union)(nil)).Elem()) {
|
|
||||||
panic(fmt.Errorf("Type %s does not implement bare.Union", ity.Name()))
|
|
||||||
}
|
|
||||||
|
|
||||||
utypes := &UnionTags{
|
|
||||||
iface: ity,
|
|
||||||
tags: make(map[reflect.Type]uint64),
|
|
||||||
types: make(map[uint64]reflect.Type),
|
|
||||||
}
|
|
||||||
unionRegistry[ity] = utypes
|
|
||||||
return utypes
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ut *UnionTags) Member(t interface{}, tag uint64) *UnionTags {
|
|
||||||
ty := reflect.TypeOf(t)
|
|
||||||
if !ty.AssignableTo(ut.iface) {
|
|
||||||
panic(fmt.Errorf("Type %s does not implement interface %s",
|
|
||||||
ty.Name(), ut.iface.Name()))
|
|
||||||
}
|
|
||||||
if _, ok := ut.tags[ty]; ok {
|
|
||||||
panic(fmt.Errorf("Type %s is already registered for union %s",
|
|
||||||
ty.Name(), ut.iface.Name()))
|
|
||||||
}
|
|
||||||
if _, ok := ut.types[tag]; ok {
|
|
||||||
panic(fmt.Errorf("Tag %d is already registered for union %s",
|
|
||||||
tag, ut.iface.Name()))
|
|
||||||
}
|
|
||||||
ut.tags[ty] = tag
|
|
||||||
ut.types[tag] = ty
|
|
||||||
return ut
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ut *UnionTags) TagFor(v interface{}) (uint64, bool) {
|
|
||||||
tag, ok := ut.tags[reflect.TypeOf(v)]
|
|
||||||
return tag, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ut *UnionTags) TypeFor(tag uint64) (reflect.Type, bool) {
|
|
||||||
t, ok := ut.types[tag]
|
|
||||||
return t, ok
|
|
||||||
}
|
|
359
vendor/git.sr.ht/~sircmpwn/go-bare/unmarshal.go
vendored
359
vendor/git.sr.ht/~sircmpwn/go-bare/unmarshal.go
vendored
@ -1,359 +0,0 @@
|
|||||||
package bare
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"reflect"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A type which implements this interface will be responsible for unmarshaling
|
|
||||||
// itself when encountered.
|
|
||||||
type Unmarshalable interface {
|
|
||||||
Unmarshal(r *Reader) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmarshals a BARE message into val, which must be a pointer to a value of
|
|
||||||
// the message type.
|
|
||||||
func Unmarshal(data []byte, val interface{}) error {
|
|
||||||
b := bytes.NewReader(data)
|
|
||||||
r := NewReader(b)
|
|
||||||
return UnmarshalBareReader(r, val)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmarshals a BARE message into value (val, which must be a pointer), from a
|
|
||||||
// reader. See Unmarshal for details.
|
|
||||||
func UnmarshalReader(r io.Reader, val interface{}) error {
|
|
||||||
r = newLimitedReader(r)
|
|
||||||
return UnmarshalBareReader(NewReader(r), val)
|
|
||||||
}
|
|
||||||
|
|
||||||
type decodeFunc func(r *Reader, v reflect.Value) error
|
|
||||||
|
|
||||||
var decodeFuncCache sync.Map // map[reflect.Type]decodeFunc
|
|
||||||
|
|
||||||
func UnmarshalBareReader(r *Reader, val interface{}) error {
|
|
||||||
t := reflect.TypeOf(val)
|
|
||||||
v := reflect.ValueOf(val)
|
|
||||||
if t.Kind() != reflect.Ptr {
|
|
||||||
return errors.New("Expected val to be pointer type")
|
|
||||||
}
|
|
||||||
|
|
||||||
return getDecoder(t.Elem())(r, v.Elem())
|
|
||||||
}
|
|
||||||
|
|
||||||
// get decoder from cache
|
|
||||||
func getDecoder(t reflect.Type) decodeFunc {
|
|
||||||
if f, ok := decodeFuncCache.Load(t); ok {
|
|
||||||
return f.(decodeFunc)
|
|
||||||
}
|
|
||||||
|
|
||||||
f := decoderFunc(t)
|
|
||||||
decodeFuncCache.Store(t, f)
|
|
||||||
return f
|
|
||||||
}
|
|
||||||
|
|
||||||
var unmarshalableInterface = reflect.TypeOf((*Unmarshalable)(nil)).Elem()
|
|
||||||
|
|
||||||
func decoderFunc(t reflect.Type) decodeFunc {
|
|
||||||
if reflect.PtrTo(t).Implements(unmarshalableInterface) {
|
|
||||||
return func(r *Reader, v reflect.Value) error {
|
|
||||||
uv := v.Addr().Interface().(Unmarshalable)
|
|
||||||
return uv.Unmarshal(r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if t.Kind() == reflect.Interface && t.Implements(unionInterface) {
|
|
||||||
return decodeUnion(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
switch t.Kind() {
|
|
||||||
case reflect.Ptr:
|
|
||||||
return decodeOptional(t.Elem())
|
|
||||||
case reflect.Struct:
|
|
||||||
return decodeStruct(t)
|
|
||||||
case reflect.Array:
|
|
||||||
return decodeArray(t)
|
|
||||||
case reflect.Slice:
|
|
||||||
return decodeSlice(t)
|
|
||||||
case reflect.Map:
|
|
||||||
return decodeMap(t)
|
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
|
||||||
return decodeUint
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
return decodeInt
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
return decodeFloat
|
|
||||||
case reflect.Bool:
|
|
||||||
return decodeBool
|
|
||||||
case reflect.String:
|
|
||||||
return decodeString
|
|
||||||
}
|
|
||||||
|
|
||||||
return func(r *Reader, v reflect.Value) error {
|
|
||||||
return &UnsupportedTypeError{v.Type()}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func decodeOptional(t reflect.Type) decodeFunc {
|
|
||||||
return func(r *Reader, v reflect.Value) error {
|
|
||||||
s, err := r.ReadU8()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if s > 1 {
|
|
||||||
return fmt.Errorf("Invalid optional value: %#x", s)
|
|
||||||
}
|
|
||||||
|
|
||||||
if s == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
v.Set(reflect.New(t))
|
|
||||||
return getDecoder(t)(r, v.Elem())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func decodeStruct(t reflect.Type) decodeFunc {
|
|
||||||
n := t.NumField()
|
|
||||||
decoders := make([]decodeFunc, n)
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
field := t.Field(i)
|
|
||||||
if field.Tag.Get("bare") == "-" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
decoders[i] = getDecoder(field.Type)
|
|
||||||
}
|
|
||||||
|
|
||||||
return func(r *Reader, v reflect.Value) error {
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
if decoders[i] == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
err := decoders[i](r, v.Field(i))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func decodeArray(t reflect.Type) decodeFunc {
|
|
||||||
f := getDecoder(t.Elem())
|
|
||||||
len := t.Len()
|
|
||||||
|
|
||||||
return func(r *Reader, v reflect.Value) error {
|
|
||||||
for i := 0; i < len; i++ {
|
|
||||||
err := f(r, v.Index(i))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func decodeSlice(t reflect.Type) decodeFunc {
|
|
||||||
elem := t.Elem()
|
|
||||||
f := getDecoder(elem)
|
|
||||||
|
|
||||||
return func(r *Reader, v reflect.Value) error {
|
|
||||||
len, err := r.ReadUint()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len > maxArrayLength {
|
|
||||||
return fmt.Errorf("Array length %d exceeds configured limit of %d", len, maxArrayLength)
|
|
||||||
}
|
|
||||||
|
|
||||||
v.Set(reflect.MakeSlice(t, int(len), int(len)))
|
|
||||||
|
|
||||||
for i := 0; i < int(len); i++ {
|
|
||||||
if err := f(r, v.Index(i)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func decodeMap(t reflect.Type) decodeFunc {
|
|
||||||
keyType := t.Key()
|
|
||||||
keyf := getDecoder(keyType)
|
|
||||||
|
|
||||||
valueType := t.Elem()
|
|
||||||
valf := getDecoder(valueType)
|
|
||||||
|
|
||||||
return func(r *Reader, v reflect.Value) error {
|
|
||||||
size, err := r.ReadUint()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if size > maxMapSize {
|
|
||||||
return fmt.Errorf("Map size %d exceeds configured limit of %d", size, maxMapSize)
|
|
||||||
}
|
|
||||||
|
|
||||||
v.Set(reflect.MakeMapWithSize(t, int(size)))
|
|
||||||
|
|
||||||
key := reflect.New(keyType).Elem()
|
|
||||||
value := reflect.New(valueType).Elem()
|
|
||||||
|
|
||||||
for i := uint64(0); i < size; i++ {
|
|
||||||
if err := keyf(r, key); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.MapIndex(key).Kind() > reflect.Invalid {
|
|
||||||
return fmt.Errorf("Encountered duplicate map key: %v", key.Interface())
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := valf(r, value); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
v.SetMapIndex(key, value)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func decodeUnion(t reflect.Type) decodeFunc {
|
|
||||||
ut, ok := unionRegistry[t]
|
|
||||||
if !ok {
|
|
||||||
return func(r *Reader, v reflect.Value) error {
|
|
||||||
return fmt.Errorf("Union type %s is not registered", t.Name())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
decoders := make(map[uint64]decodeFunc)
|
|
||||||
for tag, t := range ut.types {
|
|
||||||
t := t
|
|
||||||
f := getDecoder(t)
|
|
||||||
|
|
||||||
decoders[tag] = func(r *Reader, v reflect.Value) error {
|
|
||||||
nv := reflect.New(t)
|
|
||||||
if err := f(r, nv.Elem()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
v.Set(nv)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return func(r *Reader, v reflect.Value) error {
|
|
||||||
tag, err := r.ReadUint()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if f, ok := decoders[tag]; ok {
|
|
||||||
return f(r, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("Invalid union tag %d for type %s", tag, t.Name())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func decodeUint(r *Reader, v reflect.Value) error {
|
|
||||||
var err error
|
|
||||||
switch getIntKind(v.Type()) {
|
|
||||||
case reflect.Uint:
|
|
||||||
var u uint64
|
|
||||||
u, err = r.ReadUint()
|
|
||||||
v.SetUint(u)
|
|
||||||
|
|
||||||
case reflect.Uint8:
|
|
||||||
var u uint8
|
|
||||||
u, err = r.ReadU8()
|
|
||||||
v.SetUint(uint64(u))
|
|
||||||
|
|
||||||
case reflect.Uint16:
|
|
||||||
var u uint16
|
|
||||||
u, err = r.ReadU16()
|
|
||||||
v.SetUint(uint64(u))
|
|
||||||
case reflect.Uint32:
|
|
||||||
var u uint32
|
|
||||||
u, err = r.ReadU32()
|
|
||||||
v.SetUint(uint64(u))
|
|
||||||
|
|
||||||
case reflect.Uint64:
|
|
||||||
var u uint64
|
|
||||||
u, err = r.ReadU64()
|
|
||||||
v.SetUint(uint64(u))
|
|
||||||
|
|
||||||
default:
|
|
||||||
panic("not an uint")
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func decodeInt(r *Reader, v reflect.Value) error {
|
|
||||||
var err error
|
|
||||||
switch getIntKind(v.Type()) {
|
|
||||||
case reflect.Int:
|
|
||||||
var i int64
|
|
||||||
i, err = r.ReadInt()
|
|
||||||
v.SetInt(i)
|
|
||||||
|
|
||||||
case reflect.Int8:
|
|
||||||
var i int8
|
|
||||||
i, err = r.ReadI8()
|
|
||||||
v.SetInt(int64(i))
|
|
||||||
|
|
||||||
case reflect.Int16:
|
|
||||||
var i int16
|
|
||||||
i, err = r.ReadI16()
|
|
||||||
v.SetInt(int64(i))
|
|
||||||
case reflect.Int32:
|
|
||||||
var i int32
|
|
||||||
i, err = r.ReadI32()
|
|
||||||
v.SetInt(int64(i))
|
|
||||||
|
|
||||||
case reflect.Int64:
|
|
||||||
var i int64
|
|
||||||
i, err = r.ReadI64()
|
|
||||||
v.SetInt(int64(i))
|
|
||||||
|
|
||||||
default:
|
|
||||||
panic("not an int")
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func decodeFloat(r *Reader, v reflect.Value) error {
|
|
||||||
var err error
|
|
||||||
switch v.Type().Kind() {
|
|
||||||
case reflect.Float32:
|
|
||||||
var f float32
|
|
||||||
f, err = r.ReadF32()
|
|
||||||
v.SetFloat(float64(f))
|
|
||||||
case reflect.Float64:
|
|
||||||
var f float64
|
|
||||||
f, err = r.ReadF64()
|
|
||||||
v.SetFloat(f)
|
|
||||||
default:
|
|
||||||
panic("not a float")
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func decodeBool(r *Reader, v reflect.Value) error {
|
|
||||||
b, err := r.ReadBool()
|
|
||||||
v.SetBool(b)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func decodeString(r *Reader, v reflect.Value) error {
|
|
||||||
s, err := r.ReadString()
|
|
||||||
v.SetString(s)
|
|
||||||
return err
|
|
||||||
}
|
|
27
vendor/git.sr.ht/~sircmpwn/go-bare/varint.go
vendored
27
vendor/git.sr.ht/~sircmpwn/go-bare/varint.go
vendored
@ -1,27 +0,0 @@
|
|||||||
package bare
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Int is a variable-length encoded signed integer.
|
|
||||||
type Int int64
|
|
||||||
|
|
||||||
// Uint is a variable-length encoded unsigned integer.
|
|
||||||
type Uint uint64
|
|
||||||
|
|
||||||
var (
|
|
||||||
intType = reflect.TypeOf(Int(0))
|
|
||||||
uintType = reflect.TypeOf(Uint(0))
|
|
||||||
)
|
|
||||||
|
|
||||||
func getIntKind(t reflect.Type) reflect.Kind {
|
|
||||||
switch t {
|
|
||||||
case intType:
|
|
||||||
return reflect.Int
|
|
||||||
case uintType:
|
|
||||||
return reflect.Uint
|
|
||||||
default:
|
|
||||||
return t.Kind()
|
|
||||||
}
|
|
||||||
}
|
|
116
vendor/git.sr.ht/~sircmpwn/go-bare/writer.go
vendored
116
vendor/git.sr.ht/~sircmpwn/go-bare/writer.go
vendored
@ -1,116 +0,0 @@
|
|||||||
package bare
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"math"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Writer for BARE primitive types.
|
|
||||||
type Writer struct {
|
|
||||||
base io.Writer
|
|
||||||
scratch [binary.MaxVarintLen64]byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns a new BARE primitive writer wrapping the given io.Writer.
|
|
||||||
func NewWriter(base io.Writer) *Writer {
|
|
||||||
return &Writer{base: base}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) WriteUint(i uint64) error {
|
|
||||||
n := binary.PutUvarint(w.scratch[:], i)
|
|
||||||
_, err := w.base.Write(w.scratch[:n])
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) WriteU8(i uint8) error {
|
|
||||||
return binary.Write(w.base, binary.LittleEndian, i)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) WriteU16(i uint16) error {
|
|
||||||
return binary.Write(w.base, binary.LittleEndian, i)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) WriteU32(i uint32) error {
|
|
||||||
return binary.Write(w.base, binary.LittleEndian, i)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) WriteU64(i uint64) error {
|
|
||||||
return binary.Write(w.base, binary.LittleEndian, i)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) WriteInt(i int64) error {
|
|
||||||
var buf [binary.MaxVarintLen64]byte
|
|
||||||
n := binary.PutVarint(buf[:], i)
|
|
||||||
_, err := w.base.Write(buf[:n])
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) WriteI8(i int8) error {
|
|
||||||
return binary.Write(w.base, binary.LittleEndian, i)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) WriteI16(i int16) error {
|
|
||||||
return binary.Write(w.base, binary.LittleEndian, i)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) WriteI32(i int32) error {
|
|
||||||
return binary.Write(w.base, binary.LittleEndian, i)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) WriteI64(i int64) error {
|
|
||||||
return binary.Write(w.base, binary.LittleEndian, i)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) WriteF32(f float32) error {
|
|
||||||
if math.IsNaN(float64(f)) {
|
|
||||||
return fmt.Errorf("NaN is not permitted in BARE floats")
|
|
||||||
}
|
|
||||||
return binary.Write(w.base, binary.LittleEndian, f)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) WriteF64(f float64) error {
|
|
||||||
if math.IsNaN(f) {
|
|
||||||
return fmt.Errorf("NaN is not permitted in BARE floats")
|
|
||||||
}
|
|
||||||
return binary.Write(w.base, binary.LittleEndian, f)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) WriteBool(b bool) error {
|
|
||||||
return binary.Write(w.base, binary.LittleEndian, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) WriteString(str string) error {
|
|
||||||
return w.WriteData([]byte(str))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Writes a fixed amount of arbitrary data, defined by the length of the slice.
|
|
||||||
func (w *Writer) WriteDataFixed(data []byte) error {
|
|
||||||
var amt int = 0
|
|
||||||
for amt < len(data) {
|
|
||||||
n, err := w.base.Write(data[amt:])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
amt += n
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Writes arbitrary data whose length is encoded into the message.
|
|
||||||
func (w *Writer) WriteData(data []byte) error {
|
|
||||||
err := w.WriteUint(uint64(len(data)))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
var amt int = 0
|
|
||||||
for amt < len(data) {
|
|
||||||
n, err := w.base.Write(data[amt:])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
amt += n
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
@ -1,20 +0,0 @@
|
|||||||
Copyright (C) 2013 Blake Mizerany
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining
|
|
||||||
a copy of this software and associated documentation files (the
|
|
||||||
"Software"), to deal in the Software without restriction, including
|
|
||||||
without limitation the rights to use, copy, modify, merge, publish,
|
|
||||||
distribute, sublicense, and/or sell copies of the Software, and to
|
|
||||||
permit persons to whom the Software is furnished to do so, subject to
|
|
||||||
the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be
|
|
||||||
included in all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
|
||||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
|
||||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
|
||||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
File diff suppressed because it is too large
Load Diff
316
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
316
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
@ -1,316 +0,0 @@
|
|||||||
// Package quantile computes approximate quantiles over an unbounded data
|
|
||||||
// stream within low memory and CPU bounds.
|
|
||||||
//
|
|
||||||
// A small amount of accuracy is traded to achieve the above properties.
|
|
||||||
//
|
|
||||||
// Multiple streams can be merged before calling Query to generate a single set
|
|
||||||
// of results. This is meaningful when the streams represent the same type of
|
|
||||||
// data. See Merge and Samples.
|
|
||||||
//
|
|
||||||
// For more detailed information about the algorithm used, see:
|
|
||||||
//
|
|
||||||
// Effective Computation of Biased Quantiles over Data Streams
|
|
||||||
//
|
|
||||||
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
|
|
||||||
package quantile
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math"
|
|
||||||
"sort"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Sample holds an observed value and meta information for compression. JSON
|
|
||||||
// tags have been added for convenience.
|
|
||||||
type Sample struct {
|
|
||||||
Value float64 `json:",string"`
|
|
||||||
Width float64 `json:",string"`
|
|
||||||
Delta float64 `json:",string"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Samples represents a slice of samples. It implements sort.Interface.
|
|
||||||
type Samples []Sample
|
|
||||||
|
|
||||||
func (a Samples) Len() int { return len(a) }
|
|
||||||
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
|
|
||||||
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
|
||||||
|
|
||||||
type invariant func(s *stream, r float64) float64
|
|
||||||
|
|
||||||
// NewLowBiased returns an initialized Stream for low-biased quantiles
|
|
||||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
|
||||||
// error guarantees can still be given even for the lower ranks of the data
|
|
||||||
// distribution.
|
|
||||||
//
|
|
||||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
|
||||||
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
|
|
||||||
//
|
|
||||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
|
||||||
// properties.
|
|
||||||
func NewLowBiased(epsilon float64) *Stream {
|
|
||||||
ƒ := func(s *stream, r float64) float64 {
|
|
||||||
return 2 * epsilon * r
|
|
||||||
}
|
|
||||||
return newStream(ƒ)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewHighBiased returns an initialized Stream for high-biased quantiles
|
|
||||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
|
||||||
// error guarantees can still be given even for the higher ranks of the data
|
|
||||||
// distribution.
|
|
||||||
//
|
|
||||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
|
||||||
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
|
|
||||||
//
|
|
||||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
|
||||||
// properties.
|
|
||||||
func NewHighBiased(epsilon float64) *Stream {
|
|
||||||
ƒ := func(s *stream, r float64) float64 {
|
|
||||||
return 2 * epsilon * (s.n - r)
|
|
||||||
}
|
|
||||||
return newStream(ƒ)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTargeted returns an initialized Stream concerned with a particular set of
|
|
||||||
// quantile values that are supplied a priori. Knowing these a priori reduces
|
|
||||||
// space and computation time. The targets map maps the desired quantiles to
|
|
||||||
// their absolute errors, i.e. the true quantile of a value returned by a query
|
|
||||||
// is guaranteed to be within (Quantile±Epsilon).
|
|
||||||
//
|
|
||||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
|
|
||||||
func NewTargeted(targetMap map[float64]float64) *Stream {
|
|
||||||
// Convert map to slice to avoid slow iterations on a map.
|
|
||||||
// ƒ is called on the hot path, so converting the map to a slice
|
|
||||||
// beforehand results in significant CPU savings.
|
|
||||||
targets := targetMapToSlice(targetMap)
|
|
||||||
|
|
||||||
ƒ := func(s *stream, r float64) float64 {
|
|
||||||
var m = math.MaxFloat64
|
|
||||||
var f float64
|
|
||||||
for _, t := range targets {
|
|
||||||
if t.quantile*s.n <= r {
|
|
||||||
f = (2 * t.epsilon * r) / t.quantile
|
|
||||||
} else {
|
|
||||||
f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
|
|
||||||
}
|
|
||||||
if f < m {
|
|
||||||
m = f
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
return newStream(ƒ)
|
|
||||||
}
|
|
||||||
|
|
||||||
type target struct {
|
|
||||||
quantile float64
|
|
||||||
epsilon float64
|
|
||||||
}
|
|
||||||
|
|
||||||
func targetMapToSlice(targetMap map[float64]float64) []target {
|
|
||||||
targets := make([]target, 0, len(targetMap))
|
|
||||||
|
|
||||||
for quantile, epsilon := range targetMap {
|
|
||||||
t := target{
|
|
||||||
quantile: quantile,
|
|
||||||
epsilon: epsilon,
|
|
||||||
}
|
|
||||||
targets = append(targets, t)
|
|
||||||
}
|
|
||||||
|
|
||||||
return targets
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
|
|
||||||
// design. Take care when using across multiple goroutines.
|
|
||||||
type Stream struct {
|
|
||||||
*stream
|
|
||||||
b Samples
|
|
||||||
sorted bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func newStream(ƒ invariant) *Stream {
|
|
||||||
x := &stream{ƒ: ƒ}
|
|
||||||
return &Stream{x, make(Samples, 0, 500), true}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Insert inserts v into the stream.
|
|
||||||
func (s *Stream) Insert(v float64) {
|
|
||||||
s.insert(Sample{Value: v, Width: 1})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Stream) insert(sample Sample) {
|
|
||||||
s.b = append(s.b, sample)
|
|
||||||
s.sorted = false
|
|
||||||
if len(s.b) == cap(s.b) {
|
|
||||||
s.flush()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Query returns the computed qth percentiles value. If s was created with
|
|
||||||
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
|
|
||||||
// will return an unspecified result.
|
|
||||||
func (s *Stream) Query(q float64) float64 {
|
|
||||||
if !s.flushed() {
|
|
||||||
// Fast path when there hasn't been enough data for a flush;
|
|
||||||
// this also yields better accuracy for small sets of data.
|
|
||||||
l := len(s.b)
|
|
||||||
if l == 0 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
i := int(math.Ceil(float64(l) * q))
|
|
||||||
if i > 0 {
|
|
||||||
i -= 1
|
|
||||||
}
|
|
||||||
s.maybeSort()
|
|
||||||
return s.b[i].Value
|
|
||||||
}
|
|
||||||
s.flush()
|
|
||||||
return s.stream.query(q)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Merge merges samples into the underlying streams samples. This is handy when
|
|
||||||
// merging multiple streams from separate threads, database shards, etc.
|
|
||||||
//
|
|
||||||
// ATTENTION: This method is broken and does not yield correct results. The
|
|
||||||
// underlying algorithm is not capable of merging streams correctly.
|
|
||||||
func (s *Stream) Merge(samples Samples) {
|
|
||||||
sort.Sort(samples)
|
|
||||||
s.stream.merge(samples)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset reinitializes and clears the list reusing the samples buffer memory.
|
|
||||||
func (s *Stream) Reset() {
|
|
||||||
s.stream.reset()
|
|
||||||
s.b = s.b[:0]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Samples returns stream samples held by s.
|
|
||||||
func (s *Stream) Samples() Samples {
|
|
||||||
if !s.flushed() {
|
|
||||||
return s.b
|
|
||||||
}
|
|
||||||
s.flush()
|
|
||||||
return s.stream.samples()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Count returns the total number of samples observed in the stream
|
|
||||||
// since initialization.
|
|
||||||
func (s *Stream) Count() int {
|
|
||||||
return len(s.b) + s.stream.count()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Stream) flush() {
|
|
||||||
s.maybeSort()
|
|
||||||
s.stream.merge(s.b)
|
|
||||||
s.b = s.b[:0]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Stream) maybeSort() {
|
|
||||||
if !s.sorted {
|
|
||||||
s.sorted = true
|
|
||||||
sort.Sort(s.b)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Stream) flushed() bool {
|
|
||||||
return len(s.stream.l) > 0
|
|
||||||
}
|
|
||||||
|
|
||||||
type stream struct {
|
|
||||||
n float64
|
|
||||||
l []Sample
|
|
||||||
ƒ invariant
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stream) reset() {
|
|
||||||
s.l = s.l[:0]
|
|
||||||
s.n = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stream) insert(v float64) {
|
|
||||||
s.merge(Samples{{v, 1, 0}})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stream) merge(samples Samples) {
|
|
||||||
// TODO(beorn7): This tries to merge not only individual samples, but
|
|
||||||
// whole summaries. The paper doesn't mention merging summaries at
|
|
||||||
// all. Unittests show that the merging is inaccurate. Find out how to
|
|
||||||
// do merges properly.
|
|
||||||
var r float64
|
|
||||||
i := 0
|
|
||||||
for _, sample := range samples {
|
|
||||||
for ; i < len(s.l); i++ {
|
|
||||||
c := s.l[i]
|
|
||||||
if c.Value > sample.Value {
|
|
||||||
// Insert at position i.
|
|
||||||
s.l = append(s.l, Sample{})
|
|
||||||
copy(s.l[i+1:], s.l[i:])
|
|
||||||
s.l[i] = Sample{
|
|
||||||
sample.Value,
|
|
||||||
sample.Width,
|
|
||||||
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
|
|
||||||
// TODO(beorn7): How to calculate delta correctly?
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
goto inserted
|
|
||||||
}
|
|
||||||
r += c.Width
|
|
||||||
}
|
|
||||||
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
|
|
||||||
i++
|
|
||||||
inserted:
|
|
||||||
s.n += sample.Width
|
|
||||||
r += sample.Width
|
|
||||||
}
|
|
||||||
s.compress()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stream) count() int {
|
|
||||||
return int(s.n)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stream) query(q float64) float64 {
|
|
||||||
t := math.Ceil(q * s.n)
|
|
||||||
t += math.Ceil(s.ƒ(s, t) / 2)
|
|
||||||
p := s.l[0]
|
|
||||||
var r float64
|
|
||||||
for _, c := range s.l[1:] {
|
|
||||||
r += p.Width
|
|
||||||
if r+c.Width+c.Delta > t {
|
|
||||||
return p.Value
|
|
||||||
}
|
|
||||||
p = c
|
|
||||||
}
|
|
||||||
return p.Value
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stream) compress() {
|
|
||||||
if len(s.l) < 2 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
x := s.l[len(s.l)-1]
|
|
||||||
xi := len(s.l) - 1
|
|
||||||
r := s.n - 1 - x.Width
|
|
||||||
|
|
||||||
for i := len(s.l) - 2; i >= 0; i-- {
|
|
||||||
c := s.l[i]
|
|
||||||
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
|
|
||||||
x.Width += c.Width
|
|
||||||
s.l[xi] = x
|
|
||||||
// Remove element at i.
|
|
||||||
copy(s.l[i:], s.l[i+1:])
|
|
||||||
s.l = s.l[:len(s.l)-1]
|
|
||||||
xi -= 1
|
|
||||||
} else {
|
|
||||||
x = c
|
|
||||||
xi = i
|
|
||||||
}
|
|
||||||
r -= c.Width
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stream) samples() Samples {
|
|
||||||
samples := make(Samples, len(s.l))
|
|
||||||
copy(samples, s.l)
|
|
||||||
return samples
|
|
||||||
}
|
|
22
vendor/github.com/cespare/xxhash/v2/LICENSE.txt
generated
vendored
22
vendor/github.com/cespare/xxhash/v2/LICENSE.txt
generated
vendored
@ -1,22 +0,0 @@
|
|||||||
Copyright (c) 2016 Caleb Spare
|
|
||||||
|
|
||||||
MIT License
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining
|
|
||||||
a copy of this software and associated documentation files (the
|
|
||||||
"Software"), to deal in the Software without restriction, including
|
|
||||||
without limitation the rights to use, copy, modify, merge, publish,
|
|
||||||
distribute, sublicense, and/or sell copies of the Software, and to
|
|
||||||
permit persons to whom the Software is furnished to do so, subject to
|
|
||||||
the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be
|
|
||||||
included in all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
|
||||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
|
||||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
|
||||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
69
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
69
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
@ -1,69 +0,0 @@
|
|||||||
# xxhash
|
|
||||||
|
|
||||||
[](https://pkg.go.dev/github.com/cespare/xxhash/v2)
|
|
||||||
[](https://github.com/cespare/xxhash/actions/workflows/test.yml)
|
|
||||||
|
|
||||||
xxhash is a Go implementation of the 64-bit
|
|
||||||
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
|
|
||||||
high-quality hashing algorithm that is much faster than anything in the Go
|
|
||||||
standard library.
|
|
||||||
|
|
||||||
This package provides a straightforward API:
|
|
||||||
|
|
||||||
```
|
|
||||||
func Sum64(b []byte) uint64
|
|
||||||
func Sum64String(s string) uint64
|
|
||||||
type Digest struct{ ... }
|
|
||||||
func New() *Digest
|
|
||||||
```
|
|
||||||
|
|
||||||
The `Digest` type implements hash.Hash64. Its key methods are:
|
|
||||||
|
|
||||||
```
|
|
||||||
func (*Digest) Write([]byte) (int, error)
|
|
||||||
func (*Digest) WriteString(string) (int, error)
|
|
||||||
func (*Digest) Sum64() uint64
|
|
||||||
```
|
|
||||||
|
|
||||||
This implementation provides a fast pure-Go implementation and an even faster
|
|
||||||
assembly implementation for amd64.
|
|
||||||
|
|
||||||
## Compatibility
|
|
||||||
|
|
||||||
This package is in a module and the latest code is in version 2 of the module.
|
|
||||||
You need a version of Go with at least "minimal module compatibility" to use
|
|
||||||
github.com/cespare/xxhash/v2:
|
|
||||||
|
|
||||||
* 1.9.7+ for Go 1.9
|
|
||||||
* 1.10.3+ for Go 1.10
|
|
||||||
* Go 1.11 or later
|
|
||||||
|
|
||||||
I recommend using the latest release of Go.
|
|
||||||
|
|
||||||
## Benchmarks
|
|
||||||
|
|
||||||
Here are some quick benchmarks comparing the pure-Go and assembly
|
|
||||||
implementations of Sum64.
|
|
||||||
|
|
||||||
| input size | purego | asm |
|
|
||||||
| --- | --- | --- |
|
|
||||||
| 5 B | 979.66 MB/s | 1291.17 MB/s |
|
|
||||||
| 100 B | 7475.26 MB/s | 7973.40 MB/s |
|
|
||||||
| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
|
|
||||||
| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
|
|
||||||
|
|
||||||
These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
|
|
||||||
the following commands under Go 1.11.2:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
|
|
||||||
$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
|
|
||||||
```
|
|
||||||
|
|
||||||
## Projects using this package
|
|
||||||
|
|
||||||
- [InfluxDB](https://github.com/influxdata/influxdb)
|
|
||||||
- [Prometheus](https://github.com/prometheus/prometheus)
|
|
||||||
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
|
||||||
- [FreeCache](https://github.com/coocood/freecache)
|
|
||||||
- [FastCache](https://github.com/VictoriaMetrics/fastcache)
|
|
235
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
235
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
@ -1,235 +0,0 @@
|
|||||||
// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
|
|
||||||
// at http://cyan4973.github.io/xxHash/.
|
|
||||||
package xxhash
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/binary"
|
|
||||||
"errors"
|
|
||||||
"math/bits"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
prime1 uint64 = 11400714785074694791
|
|
||||||
prime2 uint64 = 14029467366897019727
|
|
||||||
prime3 uint64 = 1609587929392839161
|
|
||||||
prime4 uint64 = 9650029242287828579
|
|
||||||
prime5 uint64 = 2870177450012600261
|
|
||||||
)
|
|
||||||
|
|
||||||
// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
|
|
||||||
// possible in the Go code is worth a small (but measurable) performance boost
|
|
||||||
// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
|
|
||||||
// convenience in the Go code in a few places where we need to intentionally
|
|
||||||
// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
|
|
||||||
// result overflows a uint64).
|
|
||||||
var (
|
|
||||||
prime1v = prime1
|
|
||||||
prime2v = prime2
|
|
||||||
prime3v = prime3
|
|
||||||
prime4v = prime4
|
|
||||||
prime5v = prime5
|
|
||||||
)
|
|
||||||
|
|
||||||
// Digest implements hash.Hash64.
|
|
||||||
type Digest struct {
|
|
||||||
v1 uint64
|
|
||||||
v2 uint64
|
|
||||||
v3 uint64
|
|
||||||
v4 uint64
|
|
||||||
total uint64
|
|
||||||
mem [32]byte
|
|
||||||
n int // how much of mem is used
|
|
||||||
}
|
|
||||||
|
|
||||||
// New creates a new Digest that computes the 64-bit xxHash algorithm.
|
|
||||||
func New() *Digest {
|
|
||||||
var d Digest
|
|
||||||
d.Reset()
|
|
||||||
return &d
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset clears the Digest's state so that it can be reused.
|
|
||||||
func (d *Digest) Reset() {
|
|
||||||
d.v1 = prime1v + prime2
|
|
||||||
d.v2 = prime2
|
|
||||||
d.v3 = 0
|
|
||||||
d.v4 = -prime1v
|
|
||||||
d.total = 0
|
|
||||||
d.n = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size always returns 8 bytes.
|
|
||||||
func (d *Digest) Size() int { return 8 }
|
|
||||||
|
|
||||||
// BlockSize always returns 32 bytes.
|
|
||||||
func (d *Digest) BlockSize() int { return 32 }
|
|
||||||
|
|
||||||
// Write adds more data to d. It always returns len(b), nil.
|
|
||||||
func (d *Digest) Write(b []byte) (n int, err error) {
|
|
||||||
n = len(b)
|
|
||||||
d.total += uint64(n)
|
|
||||||
|
|
||||||
if d.n+n < 32 {
|
|
||||||
// This new data doesn't even fill the current block.
|
|
||||||
copy(d.mem[d.n:], b)
|
|
||||||
d.n += n
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.n > 0 {
|
|
||||||
// Finish off the partial block.
|
|
||||||
copy(d.mem[d.n:], b)
|
|
||||||
d.v1 = round(d.v1, u64(d.mem[0:8]))
|
|
||||||
d.v2 = round(d.v2, u64(d.mem[8:16]))
|
|
||||||
d.v3 = round(d.v3, u64(d.mem[16:24]))
|
|
||||||
d.v4 = round(d.v4, u64(d.mem[24:32]))
|
|
||||||
b = b[32-d.n:]
|
|
||||||
d.n = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(b) >= 32 {
|
|
||||||
// One or more full blocks left.
|
|
||||||
nw := writeBlocks(d, b)
|
|
||||||
b = b[nw:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store any remaining partial block.
|
|
||||||
copy(d.mem[:], b)
|
|
||||||
d.n = len(b)
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sum appends the current hash to b and returns the resulting slice.
|
|
||||||
func (d *Digest) Sum(b []byte) []byte {
|
|
||||||
s := d.Sum64()
|
|
||||||
return append(
|
|
||||||
b,
|
|
||||||
byte(s>>56),
|
|
||||||
byte(s>>48),
|
|
||||||
byte(s>>40),
|
|
||||||
byte(s>>32),
|
|
||||||
byte(s>>24),
|
|
||||||
byte(s>>16),
|
|
||||||
byte(s>>8),
|
|
||||||
byte(s),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sum64 returns the current hash.
|
|
||||||
func (d *Digest) Sum64() uint64 {
|
|
||||||
var h uint64
|
|
||||||
|
|
||||||
if d.total >= 32 {
|
|
||||||
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
|
|
||||||
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
|
||||||
h = mergeRound(h, v1)
|
|
||||||
h = mergeRound(h, v2)
|
|
||||||
h = mergeRound(h, v3)
|
|
||||||
h = mergeRound(h, v4)
|
|
||||||
} else {
|
|
||||||
h = d.v3 + prime5
|
|
||||||
}
|
|
||||||
|
|
||||||
h += d.total
|
|
||||||
|
|
||||||
i, end := 0, d.n
|
|
||||||
for ; i+8 <= end; i += 8 {
|
|
||||||
k1 := round(0, u64(d.mem[i:i+8]))
|
|
||||||
h ^= k1
|
|
||||||
h = rol27(h)*prime1 + prime4
|
|
||||||
}
|
|
||||||
if i+4 <= end {
|
|
||||||
h ^= uint64(u32(d.mem[i:i+4])) * prime1
|
|
||||||
h = rol23(h)*prime2 + prime3
|
|
||||||
i += 4
|
|
||||||
}
|
|
||||||
for i < end {
|
|
||||||
h ^= uint64(d.mem[i]) * prime5
|
|
||||||
h = rol11(h) * prime1
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
|
|
||||||
h ^= h >> 33
|
|
||||||
h *= prime2
|
|
||||||
h ^= h >> 29
|
|
||||||
h *= prime3
|
|
||||||
h ^= h >> 32
|
|
||||||
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
magic = "xxh\x06"
|
|
||||||
marshaledSize = len(magic) + 8*5 + 32
|
|
||||||
)
|
|
||||||
|
|
||||||
// MarshalBinary implements the encoding.BinaryMarshaler interface.
|
|
||||||
func (d *Digest) MarshalBinary() ([]byte, error) {
|
|
||||||
b := make([]byte, 0, marshaledSize)
|
|
||||||
b = append(b, magic...)
|
|
||||||
b = appendUint64(b, d.v1)
|
|
||||||
b = appendUint64(b, d.v2)
|
|
||||||
b = appendUint64(b, d.v3)
|
|
||||||
b = appendUint64(b, d.v4)
|
|
||||||
b = appendUint64(b, d.total)
|
|
||||||
b = append(b, d.mem[:d.n]...)
|
|
||||||
b = b[:len(b)+len(d.mem)-d.n]
|
|
||||||
return b, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
|
|
||||||
func (d *Digest) UnmarshalBinary(b []byte) error {
|
|
||||||
if len(b) < len(magic) || string(b[:len(magic)]) != magic {
|
|
||||||
return errors.New("xxhash: invalid hash state identifier")
|
|
||||||
}
|
|
||||||
if len(b) != marshaledSize {
|
|
||||||
return errors.New("xxhash: invalid hash state size")
|
|
||||||
}
|
|
||||||
b = b[len(magic):]
|
|
||||||
b, d.v1 = consumeUint64(b)
|
|
||||||
b, d.v2 = consumeUint64(b)
|
|
||||||
b, d.v3 = consumeUint64(b)
|
|
||||||
b, d.v4 = consumeUint64(b)
|
|
||||||
b, d.total = consumeUint64(b)
|
|
||||||
copy(d.mem[:], b)
|
|
||||||
d.n = int(d.total % uint64(len(d.mem)))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func appendUint64(b []byte, x uint64) []byte {
|
|
||||||
var a [8]byte
|
|
||||||
binary.LittleEndian.PutUint64(a[:], x)
|
|
||||||
return append(b, a[:]...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func consumeUint64(b []byte) ([]byte, uint64) {
|
|
||||||
x := u64(b)
|
|
||||||
return b[8:], x
|
|
||||||
}
|
|
||||||
|
|
||||||
func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
|
|
||||||
func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
|
|
||||||
|
|
||||||
func round(acc, input uint64) uint64 {
|
|
||||||
acc += input * prime2
|
|
||||||
acc = rol31(acc)
|
|
||||||
acc *= prime1
|
|
||||||
return acc
|
|
||||||
}
|
|
||||||
|
|
||||||
func mergeRound(acc, val uint64) uint64 {
|
|
||||||
val = round(0, val)
|
|
||||||
acc ^= val
|
|
||||||
acc = acc*prime1 + prime4
|
|
||||||
return acc
|
|
||||||
}
|
|
||||||
|
|
||||||
func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
|
|
||||||
func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
|
|
||||||
func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
|
|
||||||
func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
|
|
||||||
func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
|
|
||||||
func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
|
|
||||||
func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
|
|
||||||
func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
|
|
13
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
generated
vendored
13
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
generated
vendored
@ -1,13 +0,0 @@
|
|||||||
// +build !appengine
|
|
||||||
// +build gc
|
|
||||||
// +build !purego
|
|
||||||
|
|
||||||
package xxhash
|
|
||||||
|
|
||||||
// Sum64 computes the 64-bit xxHash digest of b.
|
|
||||||
//
|
|
||||||
//go:noescape
|
|
||||||
func Sum64(b []byte) uint64
|
|
||||||
|
|
||||||
//go:noescape
|
|
||||||
func writeBlocks(d *Digest, b []byte) int
|
|
215
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
215
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
@ -1,215 +0,0 @@
|
|||||||
// +build !appengine
|
|
||||||
// +build gc
|
|
||||||
// +build !purego
|
|
||||||
|
|
||||||
#include "textflag.h"
|
|
||||||
|
|
||||||
// Register allocation:
|
|
||||||
// AX h
|
|
||||||
// SI pointer to advance through b
|
|
||||||
// DX n
|
|
||||||
// BX loop end
|
|
||||||
// R8 v1, k1
|
|
||||||
// R9 v2
|
|
||||||
// R10 v3
|
|
||||||
// R11 v4
|
|
||||||
// R12 tmp
|
|
||||||
// R13 prime1v
|
|
||||||
// R14 prime2v
|
|
||||||
// DI prime4v
|
|
||||||
|
|
||||||
// round reads from and advances the buffer pointer in SI.
|
|
||||||
// It assumes that R13 has prime1v and R14 has prime2v.
|
|
||||||
#define round(r) \
|
|
||||||
MOVQ (SI), R12 \
|
|
||||||
ADDQ $8, SI \
|
|
||||||
IMULQ R14, R12 \
|
|
||||||
ADDQ R12, r \
|
|
||||||
ROLQ $31, r \
|
|
||||||
IMULQ R13, r
|
|
||||||
|
|
||||||
// mergeRound applies a merge round on the two registers acc and val.
|
|
||||||
// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
|
|
||||||
#define mergeRound(acc, val) \
|
|
||||||
IMULQ R14, val \
|
|
||||||
ROLQ $31, val \
|
|
||||||
IMULQ R13, val \
|
|
||||||
XORQ val, acc \
|
|
||||||
IMULQ R13, acc \
|
|
||||||
ADDQ DI, acc
|
|
||||||
|
|
||||||
// func Sum64(b []byte) uint64
|
|
||||||
TEXT ·Sum64(SB), NOSPLIT, $0-32
|
|
||||||
// Load fixed primes.
|
|
||||||
MOVQ ·prime1v(SB), R13
|
|
||||||
MOVQ ·prime2v(SB), R14
|
|
||||||
MOVQ ·prime4v(SB), DI
|
|
||||||
|
|
||||||
// Load slice.
|
|
||||||
MOVQ b_base+0(FP), SI
|
|
||||||
MOVQ b_len+8(FP), DX
|
|
||||||
LEAQ (SI)(DX*1), BX
|
|
||||||
|
|
||||||
// The first loop limit will be len(b)-32.
|
|
||||||
SUBQ $32, BX
|
|
||||||
|
|
||||||
// Check whether we have at least one block.
|
|
||||||
CMPQ DX, $32
|
|
||||||
JLT noBlocks
|
|
||||||
|
|
||||||
// Set up initial state (v1, v2, v3, v4).
|
|
||||||
MOVQ R13, R8
|
|
||||||
ADDQ R14, R8
|
|
||||||
MOVQ R14, R9
|
|
||||||
XORQ R10, R10
|
|
||||||
XORQ R11, R11
|
|
||||||
SUBQ R13, R11
|
|
||||||
|
|
||||||
// Loop until SI > BX.
|
|
||||||
blockLoop:
|
|
||||||
round(R8)
|
|
||||||
round(R9)
|
|
||||||
round(R10)
|
|
||||||
round(R11)
|
|
||||||
|
|
||||||
CMPQ SI, BX
|
|
||||||
JLE blockLoop
|
|
||||||
|
|
||||||
MOVQ R8, AX
|
|
||||||
ROLQ $1, AX
|
|
||||||
MOVQ R9, R12
|
|
||||||
ROLQ $7, R12
|
|
||||||
ADDQ R12, AX
|
|
||||||
MOVQ R10, R12
|
|
||||||
ROLQ $12, R12
|
|
||||||
ADDQ R12, AX
|
|
||||||
MOVQ R11, R12
|
|
||||||
ROLQ $18, R12
|
|
||||||
ADDQ R12, AX
|
|
||||||
|
|
||||||
mergeRound(AX, R8)
|
|
||||||
mergeRound(AX, R9)
|
|
||||||
mergeRound(AX, R10)
|
|
||||||
mergeRound(AX, R11)
|
|
||||||
|
|
||||||
JMP afterBlocks
|
|
||||||
|
|
||||||
noBlocks:
|
|
||||||
MOVQ ·prime5v(SB), AX
|
|
||||||
|
|
||||||
afterBlocks:
|
|
||||||
ADDQ DX, AX
|
|
||||||
|
|
||||||
// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
|
|
||||||
ADDQ $24, BX
|
|
||||||
|
|
||||||
CMPQ SI, BX
|
|
||||||
JG fourByte
|
|
||||||
|
|
||||||
wordLoop:
|
|
||||||
// Calculate k1.
|
|
||||||
MOVQ (SI), R8
|
|
||||||
ADDQ $8, SI
|
|
||||||
IMULQ R14, R8
|
|
||||||
ROLQ $31, R8
|
|
||||||
IMULQ R13, R8
|
|
||||||
|
|
||||||
XORQ R8, AX
|
|
||||||
ROLQ $27, AX
|
|
||||||
IMULQ R13, AX
|
|
||||||
ADDQ DI, AX
|
|
||||||
|
|
||||||
CMPQ SI, BX
|
|
||||||
JLE wordLoop
|
|
||||||
|
|
||||||
fourByte:
|
|
||||||
ADDQ $4, BX
|
|
||||||
CMPQ SI, BX
|
|
||||||
JG singles
|
|
||||||
|
|
||||||
MOVL (SI), R8
|
|
||||||
ADDQ $4, SI
|
|
||||||
IMULQ R13, R8
|
|
||||||
XORQ R8, AX
|
|
||||||
|
|
||||||
ROLQ $23, AX
|
|
||||||
IMULQ R14, AX
|
|
||||||
ADDQ ·prime3v(SB), AX
|
|
||||||
|
|
||||||
singles:
|
|
||||||
ADDQ $4, BX
|
|
||||||
CMPQ SI, BX
|
|
||||||
JGE finalize
|
|
||||||
|
|
||||||
singlesLoop:
|
|
||||||
MOVBQZX (SI), R12
|
|
||||||
ADDQ $1, SI
|
|
||||||
IMULQ ·prime5v(SB), R12
|
|
||||||
XORQ R12, AX
|
|
||||||
|
|
||||||
ROLQ $11, AX
|
|
||||||
IMULQ R13, AX
|
|
||||||
|
|
||||||
CMPQ SI, BX
|
|
||||||
JL singlesLoop
|
|
||||||
|
|
||||||
finalize:
|
|
||||||
MOVQ AX, R12
|
|
||||||
SHRQ $33, R12
|
|
||||||
XORQ R12, AX
|
|
||||||
IMULQ R14, AX
|
|
||||||
MOVQ AX, R12
|
|
||||||
SHRQ $29, R12
|
|
||||||
XORQ R12, AX
|
|
||||||
IMULQ ·prime3v(SB), AX
|
|
||||||
MOVQ AX, R12
|
|
||||||
SHRQ $32, R12
|
|
||||||
XORQ R12, AX
|
|
||||||
|
|
||||||
MOVQ AX, ret+24(FP)
|
|
||||||
RET
|
|
||||||
|
|
||||||
// writeBlocks uses the same registers as above except that it uses AX to store
|
|
||||||
// the d pointer.
|
|
||||||
|
|
||||||
// func writeBlocks(d *Digest, b []byte) int
|
|
||||||
TEXT ·writeBlocks(SB), NOSPLIT, $0-40
|
|
||||||
// Load fixed primes needed for round.
|
|
||||||
MOVQ ·prime1v(SB), R13
|
|
||||||
MOVQ ·prime2v(SB), R14
|
|
||||||
|
|
||||||
// Load slice.
|
|
||||||
MOVQ b_base+8(FP), SI
|
|
||||||
MOVQ b_len+16(FP), DX
|
|
||||||
LEAQ (SI)(DX*1), BX
|
|
||||||
SUBQ $32, BX
|
|
||||||
|
|
||||||
// Load vN from d.
|
|
||||||
MOVQ d+0(FP), AX
|
|
||||||
MOVQ 0(AX), R8 // v1
|
|
||||||
MOVQ 8(AX), R9 // v2
|
|
||||||
MOVQ 16(AX), R10 // v3
|
|
||||||
MOVQ 24(AX), R11 // v4
|
|
||||||
|
|
||||||
// We don't need to check the loop condition here; this function is
|
|
||||||
// always called with at least one block of data to process.
|
|
||||||
blockLoop:
|
|
||||||
round(R8)
|
|
||||||
round(R9)
|
|
||||||
round(R10)
|
|
||||||
round(R11)
|
|
||||||
|
|
||||||
CMPQ SI, BX
|
|
||||||
JLE blockLoop
|
|
||||||
|
|
||||||
// Copy vN back to d.
|
|
||||||
MOVQ R8, 0(AX)
|
|
||||||
MOVQ R9, 8(AX)
|
|
||||||
MOVQ R10, 16(AX)
|
|
||||||
MOVQ R11, 24(AX)
|
|
||||||
|
|
||||||
// The number of bytes written is SI minus the old base pointer.
|
|
||||||
SUBQ b_base+8(FP), SI
|
|
||||||
MOVQ SI, ret+32(FP)
|
|
||||||
|
|
||||||
RET
|
|
76
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
76
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
@ -1,76 +0,0 @@
|
|||||||
// +build !amd64 appengine !gc purego
|
|
||||||
|
|
||||||
package xxhash
|
|
||||||
|
|
||||||
// Sum64 computes the 64-bit xxHash digest of b.
|
|
||||||
func Sum64(b []byte) uint64 {
|
|
||||||
// A simpler version would be
|
|
||||||
// d := New()
|
|
||||||
// d.Write(b)
|
|
||||||
// return d.Sum64()
|
|
||||||
// but this is faster, particularly for small inputs.
|
|
||||||
|
|
||||||
n := len(b)
|
|
||||||
var h uint64
|
|
||||||
|
|
||||||
if n >= 32 {
|
|
||||||
v1 := prime1v + prime2
|
|
||||||
v2 := prime2
|
|
||||||
v3 := uint64(0)
|
|
||||||
v4 := -prime1v
|
|
||||||
for len(b) >= 32 {
|
|
||||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
|
||||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
|
||||||
v3 = round(v3, u64(b[16:24:len(b)]))
|
|
||||||
v4 = round(v4, u64(b[24:32:len(b)]))
|
|
||||||
b = b[32:len(b):len(b)]
|
|
||||||
}
|
|
||||||
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
|
||||||
h = mergeRound(h, v1)
|
|
||||||
h = mergeRound(h, v2)
|
|
||||||
h = mergeRound(h, v3)
|
|
||||||
h = mergeRound(h, v4)
|
|
||||||
} else {
|
|
||||||
h = prime5
|
|
||||||
}
|
|
||||||
|
|
||||||
h += uint64(n)
|
|
||||||
|
|
||||||
i, end := 0, len(b)
|
|
||||||
for ; i+8 <= end; i += 8 {
|
|
||||||
k1 := round(0, u64(b[i:i+8:len(b)]))
|
|
||||||
h ^= k1
|
|
||||||
h = rol27(h)*prime1 + prime4
|
|
||||||
}
|
|
||||||
if i+4 <= end {
|
|
||||||
h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
|
|
||||||
h = rol23(h)*prime2 + prime3
|
|
||||||
i += 4
|
|
||||||
}
|
|
||||||
for ; i < end; i++ {
|
|
||||||
h ^= uint64(b[i]) * prime5
|
|
||||||
h = rol11(h) * prime1
|
|
||||||
}
|
|
||||||
|
|
||||||
h ^= h >> 33
|
|
||||||
h *= prime2
|
|
||||||
h ^= h >> 29
|
|
||||||
h *= prime3
|
|
||||||
h ^= h >> 32
|
|
||||||
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeBlocks(d *Digest, b []byte) int {
|
|
||||||
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
|
|
||||||
n := len(b)
|
|
||||||
for len(b) >= 32 {
|
|
||||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
|
||||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
|
||||||
v3 = round(v3, u64(b[16:24:len(b)]))
|
|
||||||
v4 = round(v4, u64(b[24:32:len(b)]))
|
|
||||||
b = b[32:len(b):len(b)]
|
|
||||||
}
|
|
||||||
d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
|
|
||||||
return n - len(b)
|
|
||||||
}
|
|
15
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
15
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
@ -1,15 +0,0 @@
|
|||||||
// +build appengine
|
|
||||||
|
|
||||||
// This file contains the safe implementations of otherwise unsafe-using code.
|
|
||||||
|
|
||||||
package xxhash
|
|
||||||
|
|
||||||
// Sum64String computes the 64-bit xxHash digest of s.
|
|
||||||
func Sum64String(s string) uint64 {
|
|
||||||
return Sum64([]byte(s))
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteString adds more data to d. It always returns len(s), nil.
|
|
||||||
func (d *Digest) WriteString(s string) (n int, err error) {
|
|
||||||
return d.Write([]byte(s))
|
|
||||||
}
|
|
57
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
57
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
@ -1,57 +0,0 @@
|
|||||||
// +build !appengine
|
|
||||||
|
|
||||||
// This file encapsulates usage of unsafe.
|
|
||||||
// xxhash_safe.go contains the safe implementations.
|
|
||||||
|
|
||||||
package xxhash
|
|
||||||
|
|
||||||
import (
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
// In the future it's possible that compiler optimizations will make these
|
|
||||||
// XxxString functions unnecessary by realizing that calls such as
|
|
||||||
// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205.
|
|
||||||
// If that happens, even if we keep these functions they can be replaced with
|
|
||||||
// the trivial safe code.
|
|
||||||
|
|
||||||
// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is:
|
|
||||||
//
|
|
||||||
// var b []byte
|
|
||||||
// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
|
||||||
// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
|
||||||
// bh.Len = len(s)
|
|
||||||
// bh.Cap = len(s)
|
|
||||||
//
|
|
||||||
// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough
|
|
||||||
// weight to this sequence of expressions that any function that uses it will
|
|
||||||
// not be inlined. Instead, the functions below use a different unsafe
|
|
||||||
// conversion designed to minimize the inliner weight and allow both to be
|
|
||||||
// inlined. There is also a test (TestInlining) which verifies that these are
|
|
||||||
// inlined.
|
|
||||||
//
|
|
||||||
// See https://github.com/golang/go/issues/42739 for discussion.
|
|
||||||
|
|
||||||
// Sum64String computes the 64-bit xxHash digest of s.
|
|
||||||
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
|
|
||||||
func Sum64String(s string) uint64 {
|
|
||||||
b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
|
|
||||||
return Sum64(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteString adds more data to d. It always returns len(s), nil.
|
|
||||||
// It may be faster than Write([]byte(s)) by avoiding a copy.
|
|
||||||
func (d *Digest) WriteString(s string) (n int, err error) {
|
|
||||||
d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})))
|
|
||||||
// d.Write always returns len(s), nil.
|
|
||||||
// Ignoring the return output and returning these fixed values buys a
|
|
||||||
// savings of 6 in the inliner's cost model.
|
|
||||||
return len(s), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout
|
|
||||||
// of the first two words is the same as the layout of a string.
|
|
||||||
type sliceHeader struct {
|
|
||||||
s string
|
|
||||||
cap int
|
|
||||||
}
|
|
21
vendor/github.com/dustin/go-humanize/.travis.yml
generated
vendored
21
vendor/github.com/dustin/go-humanize/.travis.yml
generated
vendored
@ -1,21 +0,0 @@
|
|||||||
sudo: false
|
|
||||||
language: go
|
|
||||||
go:
|
|
||||||
- 1.3.x
|
|
||||||
- 1.5.x
|
|
||||||
- 1.6.x
|
|
||||||
- 1.7.x
|
|
||||||
- 1.8.x
|
|
||||||
- 1.9.x
|
|
||||||
- master
|
|
||||||
matrix:
|
|
||||||
allow_failures:
|
|
||||||
- go: master
|
|
||||||
fast_finish: true
|
|
||||||
install:
|
|
||||||
- # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
|
|
||||||
script:
|
|
||||||
- go get -t -v ./...
|
|
||||||
- diff -u <(echo -n) <(gofmt -d -s .)
|
|
||||||
- go tool vet .
|
|
||||||
- go test -v -race ./...
|
|
21
vendor/github.com/dustin/go-humanize/LICENSE
generated
vendored
21
vendor/github.com/dustin/go-humanize/LICENSE
generated
vendored
@ -1,21 +0,0 @@
|
|||||||
Copyright (c) 2005-2008 Dustin Sallings <dustin@spy.net>
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in
|
|
||||||
all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE.
|
|
||||||
|
|
||||||
<http://www.opensource.org/licenses/mit-license.php>
|
|
124
vendor/github.com/dustin/go-humanize/README.markdown
generated
vendored
124
vendor/github.com/dustin/go-humanize/README.markdown
generated
vendored
@ -1,124 +0,0 @@
|
|||||||
# Humane Units [](https://travis-ci.org/dustin/go-humanize) [](https://godoc.org/github.com/dustin/go-humanize)
|
|
||||||
|
|
||||||
Just a few functions for helping humanize times and sizes.
|
|
||||||
|
|
||||||
`go get` it as `github.com/dustin/go-humanize`, import it as
|
|
||||||
`"github.com/dustin/go-humanize"`, use it as `humanize`.
|
|
||||||
|
|
||||||
See [godoc](https://godoc.org/github.com/dustin/go-humanize) for
|
|
||||||
complete documentation.
|
|
||||||
|
|
||||||
## Sizes
|
|
||||||
|
|
||||||
This lets you take numbers like `82854982` and convert them to useful
|
|
||||||
strings like, `83 MB` or `79 MiB` (whichever you prefer).
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
```go
|
|
||||||
fmt.Printf("That file is %s.", humanize.Bytes(82854982)) // That file is 83 MB.
|
|
||||||
```
|
|
||||||
|
|
||||||
## Times
|
|
||||||
|
|
||||||
This lets you take a `time.Time` and spit it out in relative terms.
|
|
||||||
For example, `12 seconds ago` or `3 days from now`.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
```go
|
|
||||||
fmt.Printf("This was touched %s.", humanize.Time(someTimeInstance)) // This was touched 7 hours ago.
|
|
||||||
```
|
|
||||||
|
|
||||||
Thanks to Kyle Lemons for the time implementation from an IRC
|
|
||||||
conversation one day. It's pretty neat.
|
|
||||||
|
|
||||||
## Ordinals
|
|
||||||
|
|
||||||
From a [mailing list discussion][odisc] where a user wanted to be able
|
|
||||||
to label ordinals.
|
|
||||||
|
|
||||||
0 -> 0th
|
|
||||||
1 -> 1st
|
|
||||||
2 -> 2nd
|
|
||||||
3 -> 3rd
|
|
||||||
4 -> 4th
|
|
||||||
[...]
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
```go
|
|
||||||
fmt.Printf("You're my %s best friend.", humanize.Ordinal(193)) // You are my 193rd best friend.
|
|
||||||
```
|
|
||||||
|
|
||||||
## Commas
|
|
||||||
|
|
||||||
Want to shove commas into numbers? Be my guest.
|
|
||||||
|
|
||||||
0 -> 0
|
|
||||||
100 -> 100
|
|
||||||
1000 -> 1,000
|
|
||||||
1000000000 -> 1,000,000,000
|
|
||||||
-100000 -> -100,000
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
```go
|
|
||||||
fmt.Printf("You owe $%s.\n", humanize.Comma(6582491)) // You owe $6,582,491.
|
|
||||||
```
|
|
||||||
|
|
||||||
## Ftoa
|
|
||||||
|
|
||||||
Nicer float64 formatter that removes trailing zeros.
|
|
||||||
|
|
||||||
```go
|
|
||||||
fmt.Printf("%f", 2.24) // 2.240000
|
|
||||||
fmt.Printf("%s", humanize.Ftoa(2.24)) // 2.24
|
|
||||||
fmt.Printf("%f", 2.0) // 2.000000
|
|
||||||
fmt.Printf("%s", humanize.Ftoa(2.0)) // 2
|
|
||||||
```
|
|
||||||
|
|
||||||
## SI notation
|
|
||||||
|
|
||||||
Format numbers with [SI notation][sinotation].
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
```go
|
|
||||||
humanize.SI(0.00000000223, "M") // 2.23 nM
|
|
||||||
```
|
|
||||||
|
|
||||||
## English-specific functions
|
|
||||||
|
|
||||||
The following functions are in the `humanize/english` subpackage.
|
|
||||||
|
|
||||||
### Plurals
|
|
||||||
|
|
||||||
Simple English pluralization
|
|
||||||
|
|
||||||
```go
|
|
||||||
english.PluralWord(1, "object", "") // object
|
|
||||||
english.PluralWord(42, "object", "") // objects
|
|
||||||
english.PluralWord(2, "bus", "") // buses
|
|
||||||
english.PluralWord(99, "locus", "loci") // loci
|
|
||||||
|
|
||||||
english.Plural(1, "object", "") // 1 object
|
|
||||||
english.Plural(42, "object", "") // 42 objects
|
|
||||||
english.Plural(2, "bus", "") // 2 buses
|
|
||||||
english.Plural(99, "locus", "loci") // 99 loci
|
|
||||||
```
|
|
||||||
|
|
||||||
### Word series
|
|
||||||
|
|
||||||
Format comma-separated words lists with conjuctions:
|
|
||||||
|
|
||||||
```go
|
|
||||||
english.WordSeries([]string{"foo"}, "and") // foo
|
|
||||||
english.WordSeries([]string{"foo", "bar"}, "and") // foo and bar
|
|
||||||
english.WordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar and baz
|
|
||||||
|
|
||||||
english.OxfordWordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar, and baz
|
|
||||||
```
|
|
||||||
|
|
||||||
[odisc]: https://groups.google.com/d/topic/golang-nuts/l8NhI74jl-4/discussion
|
|
||||||
[sinotation]: http://en.wikipedia.org/wiki/Metric_prefix
|
|
31
vendor/github.com/dustin/go-humanize/big.go
generated
vendored
31
vendor/github.com/dustin/go-humanize/big.go
generated
vendored
@ -1,31 +0,0 @@
|
|||||||
package humanize
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math/big"
|
|
||||||
)
|
|
||||||
|
|
||||||
// order of magnitude (to a max order)
|
|
||||||
func oomm(n, b *big.Int, maxmag int) (float64, int) {
|
|
||||||
mag := 0
|
|
||||||
m := &big.Int{}
|
|
||||||
for n.Cmp(b) >= 0 {
|
|
||||||
n.DivMod(n, b, m)
|
|
||||||
mag++
|
|
||||||
if mag == maxmag && maxmag >= 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
|
|
||||||
}
|
|
||||||
|
|
||||||
// total order of magnitude
|
|
||||||
// (same as above, but with no upper limit)
|
|
||||||
func oom(n, b *big.Int) (float64, int) {
|
|
||||||
mag := 0
|
|
||||||
m := &big.Int{}
|
|
||||||
for n.Cmp(b) >= 0 {
|
|
||||||
n.DivMod(n, b, m)
|
|
||||||
mag++
|
|
||||||
}
|
|
||||||
return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
|
|
||||||
}
|
|
173
vendor/github.com/dustin/go-humanize/bigbytes.go
generated
vendored
173
vendor/github.com/dustin/go-humanize/bigbytes.go
generated
vendored
@ -1,173 +0,0 @@
|
|||||||
package humanize
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"math/big"
|
|
||||||
"strings"
|
|
||||||
"unicode"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
bigIECExp = big.NewInt(1024)
|
|
||||||
|
|
||||||
// BigByte is one byte in bit.Ints
|
|
||||||
BigByte = big.NewInt(1)
|
|
||||||
// BigKiByte is 1,024 bytes in bit.Ints
|
|
||||||
BigKiByte = (&big.Int{}).Mul(BigByte, bigIECExp)
|
|
||||||
// BigMiByte is 1,024 k bytes in bit.Ints
|
|
||||||
BigMiByte = (&big.Int{}).Mul(BigKiByte, bigIECExp)
|
|
||||||
// BigGiByte is 1,024 m bytes in bit.Ints
|
|
||||||
BigGiByte = (&big.Int{}).Mul(BigMiByte, bigIECExp)
|
|
||||||
// BigTiByte is 1,024 g bytes in bit.Ints
|
|
||||||
BigTiByte = (&big.Int{}).Mul(BigGiByte, bigIECExp)
|
|
||||||
// BigPiByte is 1,024 t bytes in bit.Ints
|
|
||||||
BigPiByte = (&big.Int{}).Mul(BigTiByte, bigIECExp)
|
|
||||||
// BigEiByte is 1,024 p bytes in bit.Ints
|
|
||||||
BigEiByte = (&big.Int{}).Mul(BigPiByte, bigIECExp)
|
|
||||||
// BigZiByte is 1,024 e bytes in bit.Ints
|
|
||||||
BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp)
|
|
||||||
// BigYiByte is 1,024 z bytes in bit.Ints
|
|
||||||
BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp)
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
bigSIExp = big.NewInt(1000)
|
|
||||||
|
|
||||||
// BigSIByte is one SI byte in big.Ints
|
|
||||||
BigSIByte = big.NewInt(1)
|
|
||||||
// BigKByte is 1,000 SI bytes in big.Ints
|
|
||||||
BigKByte = (&big.Int{}).Mul(BigSIByte, bigSIExp)
|
|
||||||
// BigMByte is 1,000 SI k bytes in big.Ints
|
|
||||||
BigMByte = (&big.Int{}).Mul(BigKByte, bigSIExp)
|
|
||||||
// BigGByte is 1,000 SI m bytes in big.Ints
|
|
||||||
BigGByte = (&big.Int{}).Mul(BigMByte, bigSIExp)
|
|
||||||
// BigTByte is 1,000 SI g bytes in big.Ints
|
|
||||||
BigTByte = (&big.Int{}).Mul(BigGByte, bigSIExp)
|
|
||||||
// BigPByte is 1,000 SI t bytes in big.Ints
|
|
||||||
BigPByte = (&big.Int{}).Mul(BigTByte, bigSIExp)
|
|
||||||
// BigEByte is 1,000 SI p bytes in big.Ints
|
|
||||||
BigEByte = (&big.Int{}).Mul(BigPByte, bigSIExp)
|
|
||||||
// BigZByte is 1,000 SI e bytes in big.Ints
|
|
||||||
BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp)
|
|
||||||
// BigYByte is 1,000 SI z bytes in big.Ints
|
|
||||||
BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp)
|
|
||||||
)
|
|
||||||
|
|
||||||
var bigBytesSizeTable = map[string]*big.Int{
|
|
||||||
"b": BigByte,
|
|
||||||
"kib": BigKiByte,
|
|
||||||
"kb": BigKByte,
|
|
||||||
"mib": BigMiByte,
|
|
||||||
"mb": BigMByte,
|
|
||||||
"gib": BigGiByte,
|
|
||||||
"gb": BigGByte,
|
|
||||||
"tib": BigTiByte,
|
|
||||||
"tb": BigTByte,
|
|
||||||
"pib": BigPiByte,
|
|
||||||
"pb": BigPByte,
|
|
||||||
"eib": BigEiByte,
|
|
||||||
"eb": BigEByte,
|
|
||||||
"zib": BigZiByte,
|
|
||||||
"zb": BigZByte,
|
|
||||||
"yib": BigYiByte,
|
|
||||||
"yb": BigYByte,
|
|
||||||
// Without suffix
|
|
||||||
"": BigByte,
|
|
||||||
"ki": BigKiByte,
|
|
||||||
"k": BigKByte,
|
|
||||||
"mi": BigMiByte,
|
|
||||||
"m": BigMByte,
|
|
||||||
"gi": BigGiByte,
|
|
||||||
"g": BigGByte,
|
|
||||||
"ti": BigTiByte,
|
|
||||||
"t": BigTByte,
|
|
||||||
"pi": BigPiByte,
|
|
||||||
"p": BigPByte,
|
|
||||||
"ei": BigEiByte,
|
|
||||||
"e": BigEByte,
|
|
||||||
"z": BigZByte,
|
|
||||||
"zi": BigZiByte,
|
|
||||||
"y": BigYByte,
|
|
||||||
"yi": BigYiByte,
|
|
||||||
}
|
|
||||||
|
|
||||||
var ten = big.NewInt(10)
|
|
||||||
|
|
||||||
func humanateBigBytes(s, base *big.Int, sizes []string) string {
|
|
||||||
if s.Cmp(ten) < 0 {
|
|
||||||
return fmt.Sprintf("%d B", s)
|
|
||||||
}
|
|
||||||
c := (&big.Int{}).Set(s)
|
|
||||||
val, mag := oomm(c, base, len(sizes)-1)
|
|
||||||
suffix := sizes[mag]
|
|
||||||
f := "%.0f %s"
|
|
||||||
if val < 10 {
|
|
||||||
f = "%.1f %s"
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Sprintf(f, val, suffix)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// BigBytes produces a human readable representation of an SI size.
|
|
||||||
//
|
|
||||||
// See also: ParseBigBytes.
|
|
||||||
//
|
|
||||||
// BigBytes(82854982) -> 83 MB
|
|
||||||
func BigBytes(s *big.Int) string {
|
|
||||||
sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
|
|
||||||
return humanateBigBytes(s, bigSIExp, sizes)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BigIBytes produces a human readable representation of an IEC size.
|
|
||||||
//
|
|
||||||
// See also: ParseBigBytes.
|
|
||||||
//
|
|
||||||
// BigIBytes(82854982) -> 79 MiB
|
|
||||||
func BigIBytes(s *big.Int) string {
|
|
||||||
sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
|
|
||||||
return humanateBigBytes(s, bigIECExp, sizes)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseBigBytes parses a string representation of bytes into the number
|
|
||||||
// of bytes it represents.
|
|
||||||
//
|
|
||||||
// See also: BigBytes, BigIBytes.
|
|
||||||
//
|
|
||||||
// ParseBigBytes("42 MB") -> 42000000, nil
|
|
||||||
// ParseBigBytes("42 mib") -> 44040192, nil
|
|
||||||
func ParseBigBytes(s string) (*big.Int, error) {
|
|
||||||
lastDigit := 0
|
|
||||||
hasComma := false
|
|
||||||
for _, r := range s {
|
|
||||||
if !(unicode.IsDigit(r) || r == '.' || r == ',') {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if r == ',' {
|
|
||||||
hasComma = true
|
|
||||||
}
|
|
||||||
lastDigit++
|
|
||||||
}
|
|
||||||
|
|
||||||
num := s[:lastDigit]
|
|
||||||
if hasComma {
|
|
||||||
num = strings.Replace(num, ",", "", -1)
|
|
||||||
}
|
|
||||||
|
|
||||||
val := &big.Rat{}
|
|
||||||
_, err := fmt.Sscanf(num, "%f", val)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
|
|
||||||
if m, ok := bigBytesSizeTable[extra]; ok {
|
|
||||||
mv := (&big.Rat{}).SetInt(m)
|
|
||||||
val.Mul(val, mv)
|
|
||||||
rv := &big.Int{}
|
|
||||||
rv.Div(val.Num(), val.Denom())
|
|
||||||
return rv, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, fmt.Errorf("unhandled size name: %v", extra)
|
|
||||||
}
|
|
143
vendor/github.com/dustin/go-humanize/bytes.go
generated
vendored
143
vendor/github.com/dustin/go-humanize/bytes.go
generated
vendored
@ -1,143 +0,0 @@
|
|||||||
package humanize
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"unicode"
|
|
||||||
)
|
|
||||||
|
|
||||||
// IEC Sizes.
|
|
||||||
// kibis of bits
|
|
||||||
const (
|
|
||||||
Byte = 1 << (iota * 10)
|
|
||||||
KiByte
|
|
||||||
MiByte
|
|
||||||
GiByte
|
|
||||||
TiByte
|
|
||||||
PiByte
|
|
||||||
EiByte
|
|
||||||
)
|
|
||||||
|
|
||||||
// SI Sizes.
|
|
||||||
const (
|
|
||||||
IByte = 1
|
|
||||||
KByte = IByte * 1000
|
|
||||||
MByte = KByte * 1000
|
|
||||||
GByte = MByte * 1000
|
|
||||||
TByte = GByte * 1000
|
|
||||||
PByte = TByte * 1000
|
|
||||||
EByte = PByte * 1000
|
|
||||||
)
|
|
||||||
|
|
||||||
var bytesSizeTable = map[string]uint64{
|
|
||||||
"b": Byte,
|
|
||||||
"kib": KiByte,
|
|
||||||
"kb": KByte,
|
|
||||||
"mib": MiByte,
|
|
||||||
"mb": MByte,
|
|
||||||
"gib": GiByte,
|
|
||||||
"gb": GByte,
|
|
||||||
"tib": TiByte,
|
|
||||||
"tb": TByte,
|
|
||||||
"pib": PiByte,
|
|
||||||
"pb": PByte,
|
|
||||||
"eib": EiByte,
|
|
||||||
"eb": EByte,
|
|
||||||
// Without suffix
|
|
||||||
"": Byte,
|
|
||||||
"ki": KiByte,
|
|
||||||
"k": KByte,
|
|
||||||
"mi": MiByte,
|
|
||||||
"m": MByte,
|
|
||||||
"gi": GiByte,
|
|
||||||
"g": GByte,
|
|
||||||
"ti": TiByte,
|
|
||||||
"t": TByte,
|
|
||||||
"pi": PiByte,
|
|
||||||
"p": PByte,
|
|
||||||
"ei": EiByte,
|
|
||||||
"e": EByte,
|
|
||||||
}
|
|
||||||
|
|
||||||
func logn(n, b float64) float64 {
|
|
||||||
return math.Log(n) / math.Log(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func humanateBytes(s uint64, base float64, sizes []string) string {
|
|
||||||
if s < 10 {
|
|
||||||
return fmt.Sprintf("%d B", s)
|
|
||||||
}
|
|
||||||
e := math.Floor(logn(float64(s), base))
|
|
||||||
suffix := sizes[int(e)]
|
|
||||||
val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10
|
|
||||||
f := "%.0f %s"
|
|
||||||
if val < 10 {
|
|
||||||
f = "%.1f %s"
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Sprintf(f, val, suffix)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bytes produces a human readable representation of an SI size.
|
|
||||||
//
|
|
||||||
// See also: ParseBytes.
|
|
||||||
//
|
|
||||||
// Bytes(82854982) -> 83 MB
|
|
||||||
func Bytes(s uint64) string {
|
|
||||||
sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB"}
|
|
||||||
return humanateBytes(s, 1000, sizes)
|
|
||||||
}
|
|
||||||
|
|
||||||
// IBytes produces a human readable representation of an IEC size.
|
|
||||||
//
|
|
||||||
// See also: ParseBytes.
|
|
||||||
//
|
|
||||||
// IBytes(82854982) -> 79 MiB
|
|
||||||
func IBytes(s uint64) string {
|
|
||||||
sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"}
|
|
||||||
return humanateBytes(s, 1024, sizes)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseBytes parses a string representation of bytes into the number
|
|
||||||
// of bytes it represents.
|
|
||||||
//
|
|
||||||
// See Also: Bytes, IBytes.
|
|
||||||
//
|
|
||||||
// ParseBytes("42 MB") -> 42000000, nil
|
|
||||||
// ParseBytes("42 mib") -> 44040192, nil
|
|
||||||
func ParseBytes(s string) (uint64, error) {
|
|
||||||
lastDigit := 0
|
|
||||||
hasComma := false
|
|
||||||
for _, r := range s {
|
|
||||||
if !(unicode.IsDigit(r) || r == '.' || r == ',') {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if r == ',' {
|
|
||||||
hasComma = true
|
|
||||||
}
|
|
||||||
lastDigit++
|
|
||||||
}
|
|
||||||
|
|
||||||
num := s[:lastDigit]
|
|
||||||
if hasComma {
|
|
||||||
num = strings.Replace(num, ",", "", -1)
|
|
||||||
}
|
|
||||||
|
|
||||||
f, err := strconv.ParseFloat(num, 64)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
|
|
||||||
if m, ok := bytesSizeTable[extra]; ok {
|
|
||||||
f *= float64(m)
|
|
||||||
if f >= math.MaxUint64 {
|
|
||||||
return 0, fmt.Errorf("too large: %v", s)
|
|
||||||
}
|
|
||||||
return uint64(f), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0, fmt.Errorf("unhandled size name: %v", extra)
|
|
||||||
}
|
|
116
vendor/github.com/dustin/go-humanize/comma.go
generated
vendored
116
vendor/github.com/dustin/go-humanize/comma.go
generated
vendored
@ -1,116 +0,0 @@
|
|||||||
package humanize
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"math"
|
|
||||||
"math/big"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Comma produces a string form of the given number in base 10 with
|
|
||||||
// commas after every three orders of magnitude.
|
|
||||||
//
|
|
||||||
// e.g. Comma(834142) -> 834,142
|
|
||||||
func Comma(v int64) string {
|
|
||||||
sign := ""
|
|
||||||
|
|
||||||
// Min int64 can't be negated to a usable value, so it has to be special cased.
|
|
||||||
if v == math.MinInt64 {
|
|
||||||
return "-9,223,372,036,854,775,808"
|
|
||||||
}
|
|
||||||
|
|
||||||
if v < 0 {
|
|
||||||
sign = "-"
|
|
||||||
v = 0 - v
|
|
||||||
}
|
|
||||||
|
|
||||||
parts := []string{"", "", "", "", "", "", ""}
|
|
||||||
j := len(parts) - 1
|
|
||||||
|
|
||||||
for v > 999 {
|
|
||||||
parts[j] = strconv.FormatInt(v%1000, 10)
|
|
||||||
switch len(parts[j]) {
|
|
||||||
case 2:
|
|
||||||
parts[j] = "0" + parts[j]
|
|
||||||
case 1:
|
|
||||||
parts[j] = "00" + parts[j]
|
|
||||||
}
|
|
||||||
v = v / 1000
|
|
||||||
j--
|
|
||||||
}
|
|
||||||
parts[j] = strconv.Itoa(int(v))
|
|
||||||
return sign + strings.Join(parts[j:], ",")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Commaf produces a string form of the given number in base 10 with
|
|
||||||
// commas after every three orders of magnitude.
|
|
||||||
//
|
|
||||||
// e.g. Commaf(834142.32) -> 834,142.32
|
|
||||||
func Commaf(v float64) string {
|
|
||||||
buf := &bytes.Buffer{}
|
|
||||||
if v < 0 {
|
|
||||||
buf.Write([]byte{'-'})
|
|
||||||
v = 0 - v
|
|
||||||
}
|
|
||||||
|
|
||||||
comma := []byte{','}
|
|
||||||
|
|
||||||
parts := strings.Split(strconv.FormatFloat(v, 'f', -1, 64), ".")
|
|
||||||
pos := 0
|
|
||||||
if len(parts[0])%3 != 0 {
|
|
||||||
pos += len(parts[0]) % 3
|
|
||||||
buf.WriteString(parts[0][:pos])
|
|
||||||
buf.Write(comma)
|
|
||||||
}
|
|
||||||
for ; pos < len(parts[0]); pos += 3 {
|
|
||||||
buf.WriteString(parts[0][pos : pos+3])
|
|
||||||
buf.Write(comma)
|
|
||||||
}
|
|
||||||
buf.Truncate(buf.Len() - 1)
|
|
||||||
|
|
||||||
if len(parts) > 1 {
|
|
||||||
buf.Write([]byte{'.'})
|
|
||||||
buf.WriteString(parts[1])
|
|
||||||
}
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// CommafWithDigits works like the Commaf but limits the resulting
|
|
||||||
// string to the given number of decimal places.
|
|
||||||
//
|
|
||||||
// e.g. CommafWithDigits(834142.32, 1) -> 834,142.3
|
|
||||||
func CommafWithDigits(f float64, decimals int) string {
|
|
||||||
return stripTrailingDigits(Commaf(f), decimals)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BigComma produces a string form of the given big.Int in base 10
|
|
||||||
// with commas after every three orders of magnitude.
|
|
||||||
func BigComma(b *big.Int) string {
|
|
||||||
sign := ""
|
|
||||||
if b.Sign() < 0 {
|
|
||||||
sign = "-"
|
|
||||||
b.Abs(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
athousand := big.NewInt(1000)
|
|
||||||
c := (&big.Int{}).Set(b)
|
|
||||||
_, m := oom(c, athousand)
|
|
||||||
parts := make([]string, m+1)
|
|
||||||
j := len(parts) - 1
|
|
||||||
|
|
||||||
mod := &big.Int{}
|
|
||||||
for b.Cmp(athousand) >= 0 {
|
|
||||||
b.DivMod(b, athousand, mod)
|
|
||||||
parts[j] = strconv.FormatInt(mod.Int64(), 10)
|
|
||||||
switch len(parts[j]) {
|
|
||||||
case 2:
|
|
||||||
parts[j] = "0" + parts[j]
|
|
||||||
case 1:
|
|
||||||
parts[j] = "00" + parts[j]
|
|
||||||
}
|
|
||||||
j--
|
|
||||||
}
|
|
||||||
parts[j] = strconv.Itoa(int(b.Int64()))
|
|
||||||
return sign + strings.Join(parts[j:], ",")
|
|
||||||
}
|
|
40
vendor/github.com/dustin/go-humanize/commaf.go
generated
vendored
40
vendor/github.com/dustin/go-humanize/commaf.go
generated
vendored
@ -1,40 +0,0 @@
|
|||||||
// +build go1.6
|
|
||||||
|
|
||||||
package humanize
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"math/big"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// BigCommaf produces a string form of the given big.Float in base 10
|
|
||||||
// with commas after every three orders of magnitude.
|
|
||||||
func BigCommaf(v *big.Float) string {
|
|
||||||
buf := &bytes.Buffer{}
|
|
||||||
if v.Sign() < 0 {
|
|
||||||
buf.Write([]byte{'-'})
|
|
||||||
v.Abs(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
comma := []byte{','}
|
|
||||||
|
|
||||||
parts := strings.Split(v.Text('f', -1), ".")
|
|
||||||
pos := 0
|
|
||||||
if len(parts[0])%3 != 0 {
|
|
||||||
pos += len(parts[0]) % 3
|
|
||||||
buf.WriteString(parts[0][:pos])
|
|
||||||
buf.Write(comma)
|
|
||||||
}
|
|
||||||
for ; pos < len(parts[0]); pos += 3 {
|
|
||||||
buf.WriteString(parts[0][pos : pos+3])
|
|
||||||
buf.Write(comma)
|
|
||||||
}
|
|
||||||
buf.Truncate(buf.Len() - 1)
|
|
||||||
|
|
||||||
if len(parts) > 1 {
|
|
||||||
buf.Write([]byte{'.'})
|
|
||||||
buf.WriteString(parts[1])
|
|
||||||
}
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
46
vendor/github.com/dustin/go-humanize/ftoa.go
generated
vendored
46
vendor/github.com/dustin/go-humanize/ftoa.go
generated
vendored
@ -1,46 +0,0 @@
|
|||||||
package humanize
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
func stripTrailingZeros(s string) string {
|
|
||||||
offset := len(s) - 1
|
|
||||||
for offset > 0 {
|
|
||||||
if s[offset] == '.' {
|
|
||||||
offset--
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if s[offset] != '0' {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
offset--
|
|
||||||
}
|
|
||||||
return s[:offset+1]
|
|
||||||
}
|
|
||||||
|
|
||||||
func stripTrailingDigits(s string, digits int) string {
|
|
||||||
if i := strings.Index(s, "."); i >= 0 {
|
|
||||||
if digits <= 0 {
|
|
||||||
return s[:i]
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
if i+digits >= len(s) {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
return s[:i+digits]
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ftoa converts a float to a string with no trailing zeros.
|
|
||||||
func Ftoa(num float64) string {
|
|
||||||
return stripTrailingZeros(strconv.FormatFloat(num, 'f', 6, 64))
|
|
||||||
}
|
|
||||||
|
|
||||||
// FtoaWithDigits converts a float to a string but limits the resulting string
|
|
||||||
// to the given number of decimal places, and no trailing zeros.
|
|
||||||
func FtoaWithDigits(num float64, digits int) string {
|
|
||||||
return stripTrailingZeros(stripTrailingDigits(strconv.FormatFloat(num, 'f', 6, 64), digits))
|
|
||||||
}
|
|
8
vendor/github.com/dustin/go-humanize/humanize.go
generated
vendored
8
vendor/github.com/dustin/go-humanize/humanize.go
generated
vendored
@ -1,8 +0,0 @@
|
|||||||
/*
|
|
||||||
Package humanize converts boring ugly numbers to human-friendly strings and back.
|
|
||||||
|
|
||||||
Durations can be turned into strings such as "3 days ago", numbers
|
|
||||||
representing sizes like 82854982 into useful strings like, "83 MB" or
|
|
||||||
"79 MiB" (whichever you prefer).
|
|
||||||
*/
|
|
||||||
package humanize
|
|
192
vendor/github.com/dustin/go-humanize/number.go
generated
vendored
192
vendor/github.com/dustin/go-humanize/number.go
generated
vendored
@ -1,192 +0,0 @@
|
|||||||
package humanize
|
|
||||||
|
|
||||||
/*
|
|
||||||
Slightly adapted from the source to fit go-humanize.
|
|
||||||
|
|
||||||
Author: https://github.com/gorhill
|
|
||||||
Source: https://gist.github.com/gorhill/5285193
|
|
||||||
|
|
||||||
*/
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math"
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
renderFloatPrecisionMultipliers = [...]float64{
|
|
||||||
1,
|
|
||||||
10,
|
|
||||||
100,
|
|
||||||
1000,
|
|
||||||
10000,
|
|
||||||
100000,
|
|
||||||
1000000,
|
|
||||||
10000000,
|
|
||||||
100000000,
|
|
||||||
1000000000,
|
|
||||||
}
|
|
||||||
|
|
||||||
renderFloatPrecisionRounders = [...]float64{
|
|
||||||
0.5,
|
|
||||||
0.05,
|
|
||||||
0.005,
|
|
||||||
0.0005,
|
|
||||||
0.00005,
|
|
||||||
0.000005,
|
|
||||||
0.0000005,
|
|
||||||
0.00000005,
|
|
||||||
0.000000005,
|
|
||||||
0.0000000005,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// FormatFloat produces a formatted number as string based on the following user-specified criteria:
|
|
||||||
// * thousands separator
|
|
||||||
// * decimal separator
|
|
||||||
// * decimal precision
|
|
||||||
//
|
|
||||||
// Usage: s := RenderFloat(format, n)
|
|
||||||
// The format parameter tells how to render the number n.
|
|
||||||
//
|
|
||||||
// See examples: http://play.golang.org/p/LXc1Ddm1lJ
|
|
||||||
//
|
|
||||||
// Examples of format strings, given n = 12345.6789:
|
|
||||||
// "#,###.##" => "12,345.67"
|
|
||||||
// "#,###." => "12,345"
|
|
||||||
// "#,###" => "12345,678"
|
|
||||||
// "#\u202F###,##" => "12 345,68"
|
|
||||||
// "#.###,###### => 12.345,678900
|
|
||||||
// "" (aka default format) => 12,345.67
|
|
||||||
//
|
|
||||||
// The highest precision allowed is 9 digits after the decimal symbol.
|
|
||||||
// There is also a version for integer number, FormatInteger(),
|
|
||||||
// which is convenient for calls within template.
|
|
||||||
func FormatFloat(format string, n float64) string {
|
|
||||||
// Special cases:
|
|
||||||
// NaN = "NaN"
|
|
||||||
// +Inf = "+Infinity"
|
|
||||||
// -Inf = "-Infinity"
|
|
||||||
if math.IsNaN(n) {
|
|
||||||
return "NaN"
|
|
||||||
}
|
|
||||||
if n > math.MaxFloat64 {
|
|
||||||
return "Infinity"
|
|
||||||
}
|
|
||||||
if n < -math.MaxFloat64 {
|
|
||||||
return "-Infinity"
|
|
||||||
}
|
|
||||||
|
|
||||||
// default format
|
|
||||||
precision := 2
|
|
||||||
decimalStr := "."
|
|
||||||
thousandStr := ","
|
|
||||||
positiveStr := ""
|
|
||||||
negativeStr := "-"
|
|
||||||
|
|
||||||
if len(format) > 0 {
|
|
||||||
format := []rune(format)
|
|
||||||
|
|
||||||
// If there is an explicit format directive,
|
|
||||||
// then default values are these:
|
|
||||||
precision = 9
|
|
||||||
thousandStr = ""
|
|
||||||
|
|
||||||
// collect indices of meaningful formatting directives
|
|
||||||
formatIndx := []int{}
|
|
||||||
for i, char := range format {
|
|
||||||
if char != '#' && char != '0' {
|
|
||||||
formatIndx = append(formatIndx, i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(formatIndx) > 0 {
|
|
||||||
// Directive at index 0:
|
|
||||||
// Must be a '+'
|
|
||||||
// Raise an error if not the case
|
|
||||||
// index: 0123456789
|
|
||||||
// +0.000,000
|
|
||||||
// +000,000.0
|
|
||||||
// +0000.00
|
|
||||||
// +0000
|
|
||||||
if formatIndx[0] == 0 {
|
|
||||||
if format[formatIndx[0]] != '+' {
|
|
||||||
panic("RenderFloat(): invalid positive sign directive")
|
|
||||||
}
|
|
||||||
positiveStr = "+"
|
|
||||||
formatIndx = formatIndx[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Two directives:
|
|
||||||
// First is thousands separator
|
|
||||||
// Raise an error if not followed by 3-digit
|
|
||||||
// 0123456789
|
|
||||||
// 0.000,000
|
|
||||||
// 000,000.00
|
|
||||||
if len(formatIndx) == 2 {
|
|
||||||
if (formatIndx[1] - formatIndx[0]) != 4 {
|
|
||||||
panic("RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers")
|
|
||||||
}
|
|
||||||
thousandStr = string(format[formatIndx[0]])
|
|
||||||
formatIndx = formatIndx[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// One directive:
|
|
||||||
// Directive is decimal separator
|
|
||||||
// The number of digit-specifier following the separator indicates wanted precision
|
|
||||||
// 0123456789
|
|
||||||
// 0.00
|
|
||||||
// 000,0000
|
|
||||||
if len(formatIndx) == 1 {
|
|
||||||
decimalStr = string(format[formatIndx[0]])
|
|
||||||
precision = len(format) - formatIndx[0] - 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// generate sign part
|
|
||||||
var signStr string
|
|
||||||
if n >= 0.000000001 {
|
|
||||||
signStr = positiveStr
|
|
||||||
} else if n <= -0.000000001 {
|
|
||||||
signStr = negativeStr
|
|
||||||
n = -n
|
|
||||||
} else {
|
|
||||||
signStr = ""
|
|
||||||
n = 0.0
|
|
||||||
}
|
|
||||||
|
|
||||||
// split number into integer and fractional parts
|
|
||||||
intf, fracf := math.Modf(n + renderFloatPrecisionRounders[precision])
|
|
||||||
|
|
||||||
// generate integer part string
|
|
||||||
intStr := strconv.FormatInt(int64(intf), 10)
|
|
||||||
|
|
||||||
// add thousand separator if required
|
|
||||||
if len(thousandStr) > 0 {
|
|
||||||
for i := len(intStr); i > 3; {
|
|
||||||
i -= 3
|
|
||||||
intStr = intStr[:i] + thousandStr + intStr[i:]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// no fractional part, we can leave now
|
|
||||||
if precision == 0 {
|
|
||||||
return signStr + intStr
|
|
||||||
}
|
|
||||||
|
|
||||||
// generate fractional part
|
|
||||||
fracStr := strconv.Itoa(int(fracf * renderFloatPrecisionMultipliers[precision]))
|
|
||||||
// may need padding
|
|
||||||
if len(fracStr) < precision {
|
|
||||||
fracStr = "000000000000000"[:precision-len(fracStr)] + fracStr
|
|
||||||
}
|
|
||||||
|
|
||||||
return signStr + intStr + decimalStr + fracStr
|
|
||||||
}
|
|
||||||
|
|
||||||
// FormatInteger produces a formatted number as string.
|
|
||||||
// See FormatFloat.
|
|
||||||
func FormatInteger(format string, n int) string {
|
|
||||||
return FormatFloat(format, float64(n))
|
|
||||||
}
|
|
25
vendor/github.com/dustin/go-humanize/ordinals.go
generated
vendored
25
vendor/github.com/dustin/go-humanize/ordinals.go
generated
vendored
@ -1,25 +0,0 @@
|
|||||||
package humanize
|
|
||||||
|
|
||||||
import "strconv"
|
|
||||||
|
|
||||||
// Ordinal gives you the input number in a rank/ordinal format.
|
|
||||||
//
|
|
||||||
// Ordinal(3) -> 3rd
|
|
||||||
func Ordinal(x int) string {
|
|
||||||
suffix := "th"
|
|
||||||
switch x % 10 {
|
|
||||||
case 1:
|
|
||||||
if x%100 != 11 {
|
|
||||||
suffix = "st"
|
|
||||||
}
|
|
||||||
case 2:
|
|
||||||
if x%100 != 12 {
|
|
||||||
suffix = "nd"
|
|
||||||
}
|
|
||||||
case 3:
|
|
||||||
if x%100 != 13 {
|
|
||||||
suffix = "rd"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return strconv.Itoa(x) + suffix
|
|
||||||
}
|
|
123
vendor/github.com/dustin/go-humanize/si.go
generated
vendored
123
vendor/github.com/dustin/go-humanize/si.go
generated
vendored
@ -1,123 +0,0 @@
|
|||||||
package humanize
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"math"
|
|
||||||
"regexp"
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
var siPrefixTable = map[float64]string{
|
|
||||||
-24: "y", // yocto
|
|
||||||
-21: "z", // zepto
|
|
||||||
-18: "a", // atto
|
|
||||||
-15: "f", // femto
|
|
||||||
-12: "p", // pico
|
|
||||||
-9: "n", // nano
|
|
||||||
-6: "µ", // micro
|
|
||||||
-3: "m", // milli
|
|
||||||
0: "",
|
|
||||||
3: "k", // kilo
|
|
||||||
6: "M", // mega
|
|
||||||
9: "G", // giga
|
|
||||||
12: "T", // tera
|
|
||||||
15: "P", // peta
|
|
||||||
18: "E", // exa
|
|
||||||
21: "Z", // zetta
|
|
||||||
24: "Y", // yotta
|
|
||||||
}
|
|
||||||
|
|
||||||
var revSIPrefixTable = revfmap(siPrefixTable)
|
|
||||||
|
|
||||||
// revfmap reverses the map and precomputes the power multiplier
|
|
||||||
func revfmap(in map[float64]string) map[string]float64 {
|
|
||||||
rv := map[string]float64{}
|
|
||||||
for k, v := range in {
|
|
||||||
rv[v] = math.Pow(10, k)
|
|
||||||
}
|
|
||||||
return rv
|
|
||||||
}
|
|
||||||
|
|
||||||
var riParseRegex *regexp.Regexp
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
ri := `^([\-0-9.]+)\s?([`
|
|
||||||
for _, v := range siPrefixTable {
|
|
||||||
ri += v
|
|
||||||
}
|
|
||||||
ri += `]?)(.*)`
|
|
||||||
|
|
||||||
riParseRegex = regexp.MustCompile(ri)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ComputeSI finds the most appropriate SI prefix for the given number
|
|
||||||
// and returns the prefix along with the value adjusted to be within
|
|
||||||
// that prefix.
|
|
||||||
//
|
|
||||||
// See also: SI, ParseSI.
|
|
||||||
//
|
|
||||||
// e.g. ComputeSI(2.2345e-12) -> (2.2345, "p")
|
|
||||||
func ComputeSI(input float64) (float64, string) {
|
|
||||||
if input == 0 {
|
|
||||||
return 0, ""
|
|
||||||
}
|
|
||||||
mag := math.Abs(input)
|
|
||||||
exponent := math.Floor(logn(mag, 10))
|
|
||||||
exponent = math.Floor(exponent/3) * 3
|
|
||||||
|
|
||||||
value := mag / math.Pow(10, exponent)
|
|
||||||
|
|
||||||
// Handle special case where value is exactly 1000.0
|
|
||||||
// Should return 1 M instead of 1000 k
|
|
||||||
if value == 1000.0 {
|
|
||||||
exponent += 3
|
|
||||||
value = mag / math.Pow(10, exponent)
|
|
||||||
}
|
|
||||||
|
|
||||||
value = math.Copysign(value, input)
|
|
||||||
|
|
||||||
prefix := siPrefixTable[exponent]
|
|
||||||
return value, prefix
|
|
||||||
}
|
|
||||||
|
|
||||||
// SI returns a string with default formatting.
|
|
||||||
//
|
|
||||||
// SI uses Ftoa to format float value, removing trailing zeros.
|
|
||||||
//
|
|
||||||
// See also: ComputeSI, ParseSI.
|
|
||||||
//
|
|
||||||
// e.g. SI(1000000, "B") -> 1 MB
|
|
||||||
// e.g. SI(2.2345e-12, "F") -> 2.2345 pF
|
|
||||||
func SI(input float64, unit string) string {
|
|
||||||
value, prefix := ComputeSI(input)
|
|
||||||
return Ftoa(value) + " " + prefix + unit
|
|
||||||
}
|
|
||||||
|
|
||||||
// SIWithDigits works like SI but limits the resulting string to the
|
|
||||||
// given number of decimal places.
|
|
||||||
//
|
|
||||||
// e.g. SIWithDigits(1000000, 0, "B") -> 1 MB
|
|
||||||
// e.g. SIWithDigits(2.2345e-12, 2, "F") -> 2.23 pF
|
|
||||||
func SIWithDigits(input float64, decimals int, unit string) string {
|
|
||||||
value, prefix := ComputeSI(input)
|
|
||||||
return FtoaWithDigits(value, decimals) + " " + prefix + unit
|
|
||||||
}
|
|
||||||
|
|
||||||
var errInvalid = errors.New("invalid input")
|
|
||||||
|
|
||||||
// ParseSI parses an SI string back into the number and unit.
|
|
||||||
//
|
|
||||||
// See also: SI, ComputeSI.
|
|
||||||
//
|
|
||||||
// e.g. ParseSI("2.2345 pF") -> (2.2345e-12, "F", nil)
|
|
||||||
func ParseSI(input string) (float64, string, error) {
|
|
||||||
found := riParseRegex.FindStringSubmatch(input)
|
|
||||||
if len(found) != 4 {
|
|
||||||
return 0, "", errInvalid
|
|
||||||
}
|
|
||||||
mag := revSIPrefixTable[found[2]]
|
|
||||||
unit := found[3]
|
|
||||||
|
|
||||||
base, err := strconv.ParseFloat(found[1], 64)
|
|
||||||
return base * mag, unit, err
|
|
||||||
}
|
|
117
vendor/github.com/dustin/go-humanize/times.go
generated
vendored
117
vendor/github.com/dustin/go-humanize/times.go
generated
vendored
@ -1,117 +0,0 @@
|
|||||||
package humanize
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
"sort"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Seconds-based time units
|
|
||||||
const (
|
|
||||||
Day = 24 * time.Hour
|
|
||||||
Week = 7 * Day
|
|
||||||
Month = 30 * Day
|
|
||||||
Year = 12 * Month
|
|
||||||
LongTime = 37 * Year
|
|
||||||
)
|
|
||||||
|
|
||||||
// Time formats a time into a relative string.
|
|
||||||
//
|
|
||||||
// Time(someT) -> "3 weeks ago"
|
|
||||||
func Time(then time.Time) string {
|
|
||||||
return RelTime(then, time.Now(), "ago", "from now")
|
|
||||||
}
|
|
||||||
|
|
||||||
// A RelTimeMagnitude struct contains a relative time point at which
|
|
||||||
// the relative format of time will switch to a new format string. A
|
|
||||||
// slice of these in ascending order by their "D" field is passed to
|
|
||||||
// CustomRelTime to format durations.
|
|
||||||
//
|
|
||||||
// The Format field is a string that may contain a "%s" which will be
|
|
||||||
// replaced with the appropriate signed label (e.g. "ago" or "from
|
|
||||||
// now") and a "%d" that will be replaced by the quantity.
|
|
||||||
//
|
|
||||||
// The DivBy field is the amount of time the time difference must be
|
|
||||||
// divided by in order to display correctly.
|
|
||||||
//
|
|
||||||
// e.g. if D is 2*time.Minute and you want to display "%d minutes %s"
|
|
||||||
// DivBy should be time.Minute so whatever the duration is will be
|
|
||||||
// expressed in minutes.
|
|
||||||
type RelTimeMagnitude struct {
|
|
||||||
D time.Duration
|
|
||||||
Format string
|
|
||||||
DivBy time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
var defaultMagnitudes = []RelTimeMagnitude{
|
|
||||||
{time.Second, "now", time.Second},
|
|
||||||
{2 * time.Second, "1 second %s", 1},
|
|
||||||
{time.Minute, "%d seconds %s", time.Second},
|
|
||||||
{2 * time.Minute, "1 minute %s", 1},
|
|
||||||
{time.Hour, "%d minutes %s", time.Minute},
|
|
||||||
{2 * time.Hour, "1 hour %s", 1},
|
|
||||||
{Day, "%d hours %s", time.Hour},
|
|
||||||
{2 * Day, "1 day %s", 1},
|
|
||||||
{Week, "%d days %s", Day},
|
|
||||||
{2 * Week, "1 week %s", 1},
|
|
||||||
{Month, "%d weeks %s", Week},
|
|
||||||
{2 * Month, "1 month %s", 1},
|
|
||||||
{Year, "%d months %s", Month},
|
|
||||||
{18 * Month, "1 year %s", 1},
|
|
||||||
{2 * Year, "2 years %s", 1},
|
|
||||||
{LongTime, "%d years %s", Year},
|
|
||||||
{math.MaxInt64, "a long while %s", 1},
|
|
||||||
}
|
|
||||||
|
|
||||||
// RelTime formats a time into a relative string.
|
|
||||||
//
|
|
||||||
// It takes two times and two labels. In addition to the generic time
|
|
||||||
// delta string (e.g. 5 minutes), the labels are used applied so that
|
|
||||||
// the label corresponding to the smaller time is applied.
|
|
||||||
//
|
|
||||||
// RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier"
|
|
||||||
func RelTime(a, b time.Time, albl, blbl string) string {
|
|
||||||
return CustomRelTime(a, b, albl, blbl, defaultMagnitudes)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CustomRelTime formats a time into a relative string.
|
|
||||||
//
|
|
||||||
// It takes two times two labels and a table of relative time formats.
|
|
||||||
// In addition to the generic time delta string (e.g. 5 minutes), the
|
|
||||||
// labels are used applied so that the label corresponding to the
|
|
||||||
// smaller time is applied.
|
|
||||||
func CustomRelTime(a, b time.Time, albl, blbl string, magnitudes []RelTimeMagnitude) string {
|
|
||||||
lbl := albl
|
|
||||||
diff := b.Sub(a)
|
|
||||||
|
|
||||||
if a.After(b) {
|
|
||||||
lbl = blbl
|
|
||||||
diff = a.Sub(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
n := sort.Search(len(magnitudes), func(i int) bool {
|
|
||||||
return magnitudes[i].D > diff
|
|
||||||
})
|
|
||||||
|
|
||||||
if n >= len(magnitudes) {
|
|
||||||
n = len(magnitudes) - 1
|
|
||||||
}
|
|
||||||
mag := magnitudes[n]
|
|
||||||
args := []interface{}{}
|
|
||||||
escaped := false
|
|
||||||
for _, ch := range mag.Format {
|
|
||||||
if escaped {
|
|
||||||
switch ch {
|
|
||||||
case 's':
|
|
||||||
args = append(args, lbl)
|
|
||||||
case 'd':
|
|
||||||
args = append(args, diff/mag.DivBy)
|
|
||||||
}
|
|
||||||
escaped = false
|
|
||||||
} else {
|
|
||||||
escaped = ch == '%'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return fmt.Sprintf(mag.Format, args...)
|
|
||||||
}
|
|
19
vendor/github.com/emersion/go-sasl/.build.yml
generated
vendored
19
vendor/github.com/emersion/go-sasl/.build.yml
generated
vendored
@ -1,19 +0,0 @@
|
|||||||
image: alpine/latest
|
|
||||||
packages:
|
|
||||||
- go
|
|
||||||
# Required by codecov
|
|
||||||
- bash
|
|
||||||
- findutils
|
|
||||||
sources:
|
|
||||||
- https://github.com/emersion/go-sasl
|
|
||||||
tasks:
|
|
||||||
- build: |
|
|
||||||
cd go-sasl
|
|
||||||
go build -v ./...
|
|
||||||
- test: |
|
|
||||||
cd go-sasl
|
|
||||||
go test -coverprofile=coverage.txt -covermode=atomic ./...
|
|
||||||
- upload-coverage: |
|
|
||||||
cd go-sasl
|
|
||||||
export CODECOV_TOKEN=3f257f71-a128-4834-8f68-2b534e9f4cb1
|
|
||||||
curl -s https://codecov.io/bash | bash
|
|
24
vendor/github.com/emersion/go-sasl/.gitignore
generated
vendored
24
vendor/github.com/emersion/go-sasl/.gitignore
generated
vendored
@ -1,24 +0,0 @@
|
|||||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
|
||||||
*.o
|
|
||||||
*.a
|
|
||||||
*.so
|
|
||||||
|
|
||||||
# Folders
|
|
||||||
_obj
|
|
||||||
_test
|
|
||||||
|
|
||||||
# Architecture specific extensions/prefixes
|
|
||||||
*.[568vq]
|
|
||||||
[568vq].out
|
|
||||||
|
|
||||||
*.cgo1.go
|
|
||||||
*.cgo2.c
|
|
||||||
_cgo_defun.c
|
|
||||||
_cgo_gotypes.go
|
|
||||||
_cgo_export.*
|
|
||||||
|
|
||||||
_testmain.go
|
|
||||||
|
|
||||||
*.exe
|
|
||||||
*.test
|
|
||||||
*.prof
|
|
21
vendor/github.com/emersion/go-sasl/LICENSE
generated
vendored
21
vendor/github.com/emersion/go-sasl/LICENSE
generated
vendored
@ -1,21 +0,0 @@
|
|||||||
The MIT License (MIT)
|
|
||||||
|
|
||||||
Copyright (c) 2016 emersion
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
|
||||||
copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE.
|
|
18
vendor/github.com/emersion/go-sasl/README.md
generated
vendored
18
vendor/github.com/emersion/go-sasl/README.md
generated
vendored
@ -1,18 +0,0 @@
|
|||||||
# go-sasl
|
|
||||||
|
|
||||||
[](https://godocs.io/github.com/emersion/go-sasl)
|
|
||||||
[](https://travis-ci.org/emersion/go-sasl)
|
|
||||||
|
|
||||||
A [SASL](https://tools.ietf.org/html/rfc4422) library written in Go.
|
|
||||||
|
|
||||||
Implemented mechanisms:
|
|
||||||
|
|
||||||
* [ANONYMOUS](https://tools.ietf.org/html/rfc4505)
|
|
||||||
* [EXTERNAL](https://tools.ietf.org/html/rfc4422#appendix-A)
|
|
||||||
* [LOGIN](https://tools.ietf.org/html/draft-murchison-sasl-login-00) (obsolete, use PLAIN instead)
|
|
||||||
* [PLAIN](https://tools.ietf.org/html/rfc4616)
|
|
||||||
* [OAUTHBEARER](https://tools.ietf.org/html/rfc7628)
|
|
||||||
|
|
||||||
## License
|
|
||||||
|
|
||||||
MIT
|
|
56
vendor/github.com/emersion/go-sasl/anonymous.go
generated
vendored
56
vendor/github.com/emersion/go-sasl/anonymous.go
generated
vendored
@ -1,56 +0,0 @@
|
|||||||
package sasl
|
|
||||||
|
|
||||||
// The ANONYMOUS mechanism name.
|
|
||||||
const Anonymous = "ANONYMOUS"
|
|
||||||
|
|
||||||
type anonymousClient struct {
|
|
||||||
Trace string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *anonymousClient) Start() (mech string, ir []byte, err error) {
|
|
||||||
mech = Anonymous
|
|
||||||
ir = []byte(c.Trace)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *anonymousClient) Next(challenge []byte) (response []byte, err error) {
|
|
||||||
return nil, ErrUnexpectedServerChallenge
|
|
||||||
}
|
|
||||||
|
|
||||||
// A client implementation of the ANONYMOUS authentication mechanism, as
|
|
||||||
// described in RFC 4505.
|
|
||||||
func NewAnonymousClient(trace string) Client {
|
|
||||||
return &anonymousClient{trace}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get trace information from clients logging in anonymously.
|
|
||||||
type AnonymousAuthenticator func(trace string) error
|
|
||||||
|
|
||||||
type anonymousServer struct {
|
|
||||||
done bool
|
|
||||||
authenticate AnonymousAuthenticator
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *anonymousServer) Next(response []byte) (challenge []byte, done bool, err error) {
|
|
||||||
if s.done {
|
|
||||||
err = ErrUnexpectedClientResponse
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// No initial response, send an empty challenge
|
|
||||||
if response == nil {
|
|
||||||
return []byte{}, false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
s.done = true
|
|
||||||
|
|
||||||
err = s.authenticate(string(response))
|
|
||||||
done = true
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// A server implementation of the ANONYMOUS authentication mechanism, as
|
|
||||||
// described in RFC 4505.
|
|
||||||
func NewAnonymousServer(authenticator AnonymousAuthenticator) Server {
|
|
||||||
return &anonymousServer{authenticate: authenticator}
|
|
||||||
}
|
|
67
vendor/github.com/emersion/go-sasl/external.go
generated
vendored
67
vendor/github.com/emersion/go-sasl/external.go
generated
vendored
@ -1,67 +0,0 @@
|
|||||||
package sasl
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// The EXTERNAL mechanism name.
|
|
||||||
const External = "EXTERNAL"
|
|
||||||
|
|
||||||
type externalClient struct {
|
|
||||||
Identity string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *externalClient) Start() (mech string, ir []byte, err error) {
|
|
||||||
mech = External
|
|
||||||
ir = []byte(a.Identity)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *externalClient) Next(challenge []byte) (response []byte, err error) {
|
|
||||||
return nil, ErrUnexpectedServerChallenge
|
|
||||||
}
|
|
||||||
|
|
||||||
// An implementation of the EXTERNAL authentication mechanism, as described in
|
|
||||||
// RFC 4422. Authorization identity may be left blank to indicate that the
|
|
||||||
// client is requesting to act as the identity associated with the
|
|
||||||
// authentication credentials.
|
|
||||||
func NewExternalClient(identity string) Client {
|
|
||||||
return &externalClient{identity}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExternalAuthenticator authenticates users with the EXTERNAL mechanism. If
|
|
||||||
// the identity is left blank, it indicates that it is the same as the one used
|
|
||||||
// in the external credentials. If identity is not empty and the server doesn't
|
|
||||||
// support it, an error must be returned.
|
|
||||||
type ExternalAuthenticator func(identity string) error
|
|
||||||
|
|
||||||
type externalServer struct {
|
|
||||||
done bool
|
|
||||||
authenticate ExternalAuthenticator
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *externalServer) Next(response []byte) (challenge []byte, done bool, err error) {
|
|
||||||
if a.done {
|
|
||||||
return nil, false, ErrUnexpectedClientResponse
|
|
||||||
}
|
|
||||||
|
|
||||||
// No initial response, send an empty challenge
|
|
||||||
if response == nil {
|
|
||||||
return []byte{}, false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
a.done = true
|
|
||||||
|
|
||||||
if bytes.Contains(response, []byte("\x00")) {
|
|
||||||
return nil, false, errors.New("identity contains a NUL character")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, true, a.authenticate(string(response))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewExternalServer creates a server implementation of the EXTERNAL
|
|
||||||
// authentication mechanism, as described in RFC 4422.
|
|
||||||
func NewExternalServer(authenticator ExternalAuthenticator) Server {
|
|
||||||
return &externalServer{authenticate: authenticator}
|
|
||||||
}
|
|
89
vendor/github.com/emersion/go-sasl/login.go
generated
vendored
89
vendor/github.com/emersion/go-sasl/login.go
generated
vendored
@ -1,89 +0,0 @@
|
|||||||
package sasl
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
)
|
|
||||||
|
|
||||||
// The LOGIN mechanism name.
|
|
||||||
const Login = "LOGIN"
|
|
||||||
|
|
||||||
var expectedChallenge = []byte("Password:")
|
|
||||||
|
|
||||||
type loginClient struct {
|
|
||||||
Username string
|
|
||||||
Password string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *loginClient) Start() (mech string, ir []byte, err error) {
|
|
||||||
mech = "LOGIN"
|
|
||||||
ir = []byte(a.Username)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *loginClient) Next(challenge []byte) (response []byte, err error) {
|
|
||||||
if bytes.Compare(challenge, expectedChallenge) != 0 {
|
|
||||||
return nil, ErrUnexpectedServerChallenge
|
|
||||||
} else {
|
|
||||||
return []byte(a.Password), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// A client implementation of the LOGIN authentication mechanism for SMTP,
|
|
||||||
// as described in http://www.iana.org/go/draft-murchison-sasl-login
|
|
||||||
//
|
|
||||||
// It is considered obsolete, and should not be used when other mechanisms are
|
|
||||||
// available. For plaintext password authentication use PLAIN mechanism.
|
|
||||||
func NewLoginClient(username, password string) Client {
|
|
||||||
return &loginClient{username, password}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Authenticates users with an username and a password.
|
|
||||||
type LoginAuthenticator func(username, password string) error
|
|
||||||
|
|
||||||
type loginState int
|
|
||||||
|
|
||||||
const (
|
|
||||||
loginNotStarted loginState = iota
|
|
||||||
loginWaitingUsername
|
|
||||||
loginWaitingPassword
|
|
||||||
)
|
|
||||||
|
|
||||||
type loginServer struct {
|
|
||||||
state loginState
|
|
||||||
username, password string
|
|
||||||
authenticate LoginAuthenticator
|
|
||||||
}
|
|
||||||
|
|
||||||
// A server implementation of the LOGIN authentication mechanism, as described
|
|
||||||
// in https://tools.ietf.org/html/draft-murchison-sasl-login-00.
|
|
||||||
//
|
|
||||||
// LOGIN is obsolete and should only be enabled for legacy clients that cannot
|
|
||||||
// be updated to use PLAIN.
|
|
||||||
func NewLoginServer(authenticator LoginAuthenticator) Server {
|
|
||||||
return &loginServer{authenticate: authenticator}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *loginServer) Next(response []byte) (challenge []byte, done bool, err error) {
|
|
||||||
switch a.state {
|
|
||||||
case loginNotStarted:
|
|
||||||
// Check for initial response field, as per RFC4422 section 3
|
|
||||||
if response == nil {
|
|
||||||
challenge = []byte("Username:")
|
|
||||||
break
|
|
||||||
}
|
|
||||||
a.state++
|
|
||||||
fallthrough
|
|
||||||
case loginWaitingUsername:
|
|
||||||
a.username = string(response)
|
|
||||||
challenge = []byte("Password:")
|
|
||||||
case loginWaitingPassword:
|
|
||||||
a.password = string(response)
|
|
||||||
err = a.authenticate(a.username, a.password)
|
|
||||||
done = true
|
|
||||||
default:
|
|
||||||
err = ErrUnexpectedClientResponse
|
|
||||||
}
|
|
||||||
|
|
||||||
a.state++
|
|
||||||
return
|
|
||||||
}
|
|
198
vendor/github.com/emersion/go-sasl/oauthbearer.go
generated
vendored
198
vendor/github.com/emersion/go-sasl/oauthbearer.go
generated
vendored
@ -1,198 +0,0 @@
|
|||||||
package sasl
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// The OAUTHBEARER mechanism name.
|
|
||||||
const OAuthBearer = "OAUTHBEARER"
|
|
||||||
|
|
||||||
type OAuthBearerError struct {
|
|
||||||
Status string `json:"status"`
|
|
||||||
Schemes string `json:"schemes"`
|
|
||||||
Scope string `json:"scope"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type OAuthBearerOptions struct {
|
|
||||||
Username string
|
|
||||||
Token string
|
|
||||||
Host string
|
|
||||||
Port int
|
|
||||||
}
|
|
||||||
|
|
||||||
// Implements error
|
|
||||||
func (err *OAuthBearerError) Error() string {
|
|
||||||
return fmt.Sprintf("OAUTHBEARER authentication error (%v)", err.Status)
|
|
||||||
}
|
|
||||||
|
|
||||||
type oauthBearerClient struct {
|
|
||||||
OAuthBearerOptions
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *oauthBearerClient) Start() (mech string, ir []byte, err error) {
|
|
||||||
var authzid string
|
|
||||||
if a.Username != "" {
|
|
||||||
authzid = "a=" + a.Username
|
|
||||||
}
|
|
||||||
str := "n," + authzid + ","
|
|
||||||
|
|
||||||
if a.Host != "" {
|
|
||||||
str += "\x01host=" + a.Host
|
|
||||||
}
|
|
||||||
|
|
||||||
if a.Port != 0 {
|
|
||||||
str += "\x01port=" + strconv.Itoa(a.Port)
|
|
||||||
}
|
|
||||||
str += "\x01auth=Bearer " + a.Token + "\x01\x01"
|
|
||||||
ir = []byte(str)
|
|
||||||
return OAuthBearer, ir, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *oauthBearerClient) Next(challenge []byte) ([]byte, error) {
|
|
||||||
authBearerErr := &OAuthBearerError{}
|
|
||||||
if err := json.Unmarshal(challenge, authBearerErr); err != nil {
|
|
||||||
return nil, err
|
|
||||||
} else {
|
|
||||||
return nil, authBearerErr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// An implementation of the OAUTHBEARER authentication mechanism, as
|
|
||||||
// described in RFC 7628.
|
|
||||||
func NewOAuthBearerClient(opt *OAuthBearerOptions) Client {
|
|
||||||
return &oauthBearerClient{*opt}
|
|
||||||
}
|
|
||||||
|
|
||||||
type OAuthBearerAuthenticator func(opts OAuthBearerOptions) *OAuthBearerError
|
|
||||||
|
|
||||||
type oauthBearerServer struct {
|
|
||||||
done bool
|
|
||||||
failErr error
|
|
||||||
authenticate OAuthBearerAuthenticator
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *oauthBearerServer) fail(descr string) ([]byte, bool, error) {
|
|
||||||
blob, err := json.Marshal(OAuthBearerError{
|
|
||||||
Status: "invalid_request",
|
|
||||||
Schemes: "bearer",
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
panic(err) // wtf
|
|
||||||
}
|
|
||||||
a.failErr = errors.New(descr)
|
|
||||||
return blob, false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *oauthBearerServer) Next(response []byte) (challenge []byte, done bool, err error) {
|
|
||||||
// Per RFC, we cannot just send an error, we need to return JSON-structured
|
|
||||||
// value as a challenge and then after getting dummy response from the
|
|
||||||
// client stop the exchange.
|
|
||||||
if a.failErr != nil {
|
|
||||||
// Server libraries (go-smtp, go-imap) will not call Next on
|
|
||||||
// protocol-specific SASL cancel response ('*'). However, GS2 (and
|
|
||||||
// indirectly OAUTHBEARER) defines a protocol-independent way to do so
|
|
||||||
// using 0x01.
|
|
||||||
if len(response) != 1 && response[0] != 0x01 {
|
|
||||||
return nil, true, errors.New("unexpected response")
|
|
||||||
}
|
|
||||||
return nil, true, a.failErr
|
|
||||||
}
|
|
||||||
|
|
||||||
if a.done {
|
|
||||||
err = ErrUnexpectedClientResponse
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate empty challenge.
|
|
||||||
if response == nil {
|
|
||||||
return []byte{}, false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
a.done = true
|
|
||||||
|
|
||||||
// Cut n,a=username,\x01host=...\x01auth=...
|
|
||||||
// into
|
|
||||||
// n
|
|
||||||
// a=username
|
|
||||||
// \x01host=...\x01auth=...\x01\x01
|
|
||||||
parts := bytes.SplitN(response, []byte{','}, 3)
|
|
||||||
if len(parts) != 3 {
|
|
||||||
return a.fail("Invalid response")
|
|
||||||
}
|
|
||||||
flag := parts[0]
|
|
||||||
authzid := parts[1]
|
|
||||||
if !bytes.Equal(flag, []byte{'n'}) {
|
|
||||||
return a.fail("Invalid response, missing 'n' in gs2-cb-flag")
|
|
||||||
}
|
|
||||||
opts := OAuthBearerOptions{}
|
|
||||||
if len(authzid) > 0 {
|
|
||||||
if !bytes.HasPrefix(authzid, []byte("a=")) {
|
|
||||||
return a.fail("Invalid response, missing 'a=' in gs2-authzid")
|
|
||||||
}
|
|
||||||
opts.Username = string(bytes.TrimPrefix(authzid, []byte("a=")))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cut \x01host=...\x01auth=...\x01\x01
|
|
||||||
// into
|
|
||||||
// *empty*
|
|
||||||
// host=...
|
|
||||||
// auth=...
|
|
||||||
// *empty*
|
|
||||||
//
|
|
||||||
// Note that this code does not do a lot of checks to make sure the input
|
|
||||||
// follows the exact format specified by RFC.
|
|
||||||
params := bytes.Split(parts[2], []byte{0x01})
|
|
||||||
for _, p := range params {
|
|
||||||
// Skip empty fields (one at start and end).
|
|
||||||
if len(p) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
pParts := bytes.SplitN(p, []byte{'='}, 2)
|
|
||||||
if len(pParts) != 2 {
|
|
||||||
return a.fail("Invalid response, missing '='")
|
|
||||||
}
|
|
||||||
|
|
||||||
switch string(pParts[0]) {
|
|
||||||
case "host":
|
|
||||||
opts.Host = string(pParts[1])
|
|
||||||
case "port":
|
|
||||||
port, err := strconv.ParseUint(string(pParts[1]), 10, 16)
|
|
||||||
if err != nil {
|
|
||||||
return a.fail("Invalid response, malformed 'port' value")
|
|
||||||
}
|
|
||||||
opts.Port = int(port)
|
|
||||||
case "auth":
|
|
||||||
const prefix = "bearer "
|
|
||||||
strValue := string(pParts[1])
|
|
||||||
// Token type is case-insensitive.
|
|
||||||
if !strings.HasPrefix(strings.ToLower(strValue), prefix) {
|
|
||||||
return a.fail("Unsupported token type")
|
|
||||||
}
|
|
||||||
opts.Token = strValue[len(prefix):]
|
|
||||||
default:
|
|
||||||
return a.fail("Invalid response, unknown parameter: " + string(pParts[0]))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
authzErr := a.authenticate(opts)
|
|
||||||
if authzErr != nil {
|
|
||||||
blob, err := json.Marshal(authzErr)
|
|
||||||
if err != nil {
|
|
||||||
panic(err) // wtf
|
|
||||||
}
|
|
||||||
a.failErr = authzErr
|
|
||||||
return blob, false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewOAuthBearerServer(auth OAuthBearerAuthenticator) Server {
|
|
||||||
return &oauthBearerServer{authenticate: auth}
|
|
||||||
}
|
|
77
vendor/github.com/emersion/go-sasl/plain.go
generated
vendored
77
vendor/github.com/emersion/go-sasl/plain.go
generated
vendored
@ -1,77 +0,0 @@
|
|||||||
package sasl
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// The PLAIN mechanism name.
|
|
||||||
const Plain = "PLAIN"
|
|
||||||
|
|
||||||
type plainClient struct {
|
|
||||||
Identity string
|
|
||||||
Username string
|
|
||||||
Password string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *plainClient) Start() (mech string, ir []byte, err error) {
|
|
||||||
mech = "PLAIN"
|
|
||||||
ir = []byte(a.Identity + "\x00" + a.Username + "\x00" + a.Password)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *plainClient) Next(challenge []byte) (response []byte, err error) {
|
|
||||||
return nil, ErrUnexpectedServerChallenge
|
|
||||||
}
|
|
||||||
|
|
||||||
// A client implementation of the PLAIN authentication mechanism, as described
|
|
||||||
// in RFC 4616. Authorization identity may be left blank to indicate that it is
|
|
||||||
// the same as the username.
|
|
||||||
func NewPlainClient(identity, username, password string) Client {
|
|
||||||
return &plainClient{identity, username, password}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Authenticates users with an identity, a username and a password. If the
|
|
||||||
// identity is left blank, it indicates that it is the same as the username.
|
|
||||||
// If identity is not empty and the server doesn't support it, an error must be
|
|
||||||
// returned.
|
|
||||||
type PlainAuthenticator func(identity, username, password string) error
|
|
||||||
|
|
||||||
type plainServer struct {
|
|
||||||
done bool
|
|
||||||
authenticate PlainAuthenticator
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *plainServer) Next(response []byte) (challenge []byte, done bool, err error) {
|
|
||||||
if a.done {
|
|
||||||
err = ErrUnexpectedClientResponse
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// No initial response, send an empty challenge
|
|
||||||
if response == nil {
|
|
||||||
return []byte{}, false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
a.done = true
|
|
||||||
|
|
||||||
parts := bytes.Split(response, []byte("\x00"))
|
|
||||||
if len(parts) != 3 {
|
|
||||||
err = errors.New("Invalid response")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
identity := string(parts[0])
|
|
||||||
username := string(parts[1])
|
|
||||||
password := string(parts[2])
|
|
||||||
|
|
||||||
err = a.authenticate(identity, username, password)
|
|
||||||
done = true
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// A server implementation of the PLAIN authentication mechanism, as described
|
|
||||||
// in RFC 4616.
|
|
||||||
func NewPlainServer(authenticator PlainAuthenticator) Server {
|
|
||||||
return &plainServer{authenticate: authenticator}
|
|
||||||
}
|
|
45
vendor/github.com/emersion/go-sasl/sasl.go
generated
vendored
45
vendor/github.com/emersion/go-sasl/sasl.go
generated
vendored
@ -1,45 +0,0 @@
|
|||||||
// Library for Simple Authentication and Security Layer (SASL) defined in RFC 4422.
|
|
||||||
package sasl
|
|
||||||
|
|
||||||
// Note:
|
|
||||||
// Most of this code was copied, with some modifications, from net/smtp. It
|
|
||||||
// would be better if Go provided a standard package (e.g. crypto/sasl) that
|
|
||||||
// could be shared by SMTP, IMAP, and other packages.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Common SASL errors.
|
|
||||||
var (
|
|
||||||
ErrUnexpectedClientResponse = errors.New("sasl: unexpected client response")
|
|
||||||
ErrUnexpectedServerChallenge = errors.New("sasl: unexpected server challenge")
|
|
||||||
)
|
|
||||||
|
|
||||||
// Client interface to perform challenge-response authentication.
|
|
||||||
type Client interface {
|
|
||||||
// Begins SASL authentication with the server. It returns the
|
|
||||||
// authentication mechanism name and "initial response" data (if required by
|
|
||||||
// the selected mechanism). A non-nil error causes the client to abort the
|
|
||||||
// authentication attempt.
|
|
||||||
//
|
|
||||||
// A nil ir value is different from a zero-length value. The nil value
|
|
||||||
// indicates that the selected mechanism does not use an initial response,
|
|
||||||
// while a zero-length value indicates an empty initial response, which must
|
|
||||||
// be sent to the server.
|
|
||||||
Start() (mech string, ir []byte, err error)
|
|
||||||
|
|
||||||
// Continues challenge-response authentication. A non-nil error causes
|
|
||||||
// the client to abort the authentication attempt.
|
|
||||||
Next(challenge []byte) (response []byte, err error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Server interface to perform challenge-response authentication.
|
|
||||||
type Server interface {
|
|
||||||
// Begins or continues challenge-response authentication. If the client
|
|
||||||
// supplies an initial response, response is non-nil.
|
|
||||||
//
|
|
||||||
// If the authentication is finished, done is set to true. If the
|
|
||||||
// authentication has failed, an error is returned.
|
|
||||||
Next(response []byte) (challenge []byte, done bool, err error)
|
|
||||||
}
|
|
3
vendor/github.com/golang/protobuf/AUTHORS
generated
vendored
3
vendor/github.com/golang/protobuf/AUTHORS
generated
vendored
@ -1,3 +0,0 @@
|
|||||||
# This source code refers to The Go Authors for copyright purposes.
|
|
||||||
# The master list of authors is in the main Go distribution,
|
|
||||||
# visible at http://tip.golang.org/AUTHORS.
|
|
3
vendor/github.com/golang/protobuf/CONTRIBUTORS
generated
vendored
3
vendor/github.com/golang/protobuf/CONTRIBUTORS
generated
vendored
@ -1,3 +0,0 @@
|
|||||||
# This source code was written by the Go contributors.
|
|
||||||
# The master list of contributors is in the main Go distribution,
|
|
||||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
|
28
vendor/github.com/golang/protobuf/LICENSE
generated
vendored
28
vendor/github.com/golang/protobuf/LICENSE
generated
vendored
@ -1,28 +0,0 @@
|
|||||||
Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
|
||||||
modification, are permitted provided that the following conditions are
|
|
||||||
met:
|
|
||||||
|
|
||||||
* Redistributions of source code must retain the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer.
|
|
||||||
* Redistributions in binary form must reproduce the above
|
|
||||||
copyright notice, this list of conditions and the following disclaimer
|
|
||||||
in the documentation and/or other materials provided with the
|
|
||||||
distribution.
|
|
||||||
* Neither the name of Google Inc. nor the names of its
|
|
||||||
contributors may be used to endorse or promote products derived from
|
|
||||||
this software without specific prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
324
vendor/github.com/golang/protobuf/proto/buffer.go
generated
vendored
324
vendor/github.com/golang/protobuf/proto/buffer.go
generated
vendored
@ -1,324 +0,0 @@
|
|||||||
// Copyright 2019 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"google.golang.org/protobuf/encoding/prototext"
|
|
||||||
"google.golang.org/protobuf/encoding/protowire"
|
|
||||||
"google.golang.org/protobuf/runtime/protoimpl"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
WireVarint = 0
|
|
||||||
WireFixed32 = 5
|
|
||||||
WireFixed64 = 1
|
|
||||||
WireBytes = 2
|
|
||||||
WireStartGroup = 3
|
|
||||||
WireEndGroup = 4
|
|
||||||
)
|
|
||||||
|
|
||||||
// EncodeVarint returns the varint encoded bytes of v.
|
|
||||||
func EncodeVarint(v uint64) []byte {
|
|
||||||
return protowire.AppendVarint(nil, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SizeVarint returns the length of the varint encoded bytes of v.
|
|
||||||
// This is equal to len(EncodeVarint(v)).
|
|
||||||
func SizeVarint(v uint64) int {
|
|
||||||
return protowire.SizeVarint(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeVarint parses a varint encoded integer from b,
|
|
||||||
// returning the integer value and the length of the varint.
|
|
||||||
// It returns (0, 0) if there is a parse error.
|
|
||||||
func DecodeVarint(b []byte) (uint64, int) {
|
|
||||||
v, n := protowire.ConsumeVarint(b)
|
|
||||||
if n < 0 {
|
|
||||||
return 0, 0
|
|
||||||
}
|
|
||||||
return v, n
|
|
||||||
}
|
|
||||||
|
|
||||||
// Buffer is a buffer for encoding and decoding the protobuf wire format.
|
|
||||||
// It may be reused between invocations to reduce memory usage.
|
|
||||||
type Buffer struct {
|
|
||||||
buf []byte
|
|
||||||
idx int
|
|
||||||
deterministic bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBuffer allocates a new Buffer initialized with buf,
|
|
||||||
// where the contents of buf are considered the unread portion of the buffer.
|
|
||||||
func NewBuffer(buf []byte) *Buffer {
|
|
||||||
return &Buffer{buf: buf}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetDeterministic specifies whether to use deterministic serialization.
|
|
||||||
//
|
|
||||||
// Deterministic serialization guarantees that for a given binary, equal
|
|
||||||
// messages will always be serialized to the same bytes. This implies:
|
|
||||||
//
|
|
||||||
// - Repeated serialization of a message will return the same bytes.
|
|
||||||
// - Different processes of the same binary (which may be executing on
|
|
||||||
// different machines) will serialize equal messages to the same bytes.
|
|
||||||
//
|
|
||||||
// Note that the deterministic serialization is NOT canonical across
|
|
||||||
// languages. It is not guaranteed to remain stable over time. It is unstable
|
|
||||||
// across different builds with schema changes due to unknown fields.
|
|
||||||
// Users who need canonical serialization (e.g., persistent storage in a
|
|
||||||
// canonical form, fingerprinting, etc.) should define their own
|
|
||||||
// canonicalization specification and implement their own serializer rather
|
|
||||||
// than relying on this API.
|
|
||||||
//
|
|
||||||
// If deterministic serialization is requested, map entries will be sorted
|
|
||||||
// by keys in lexographical order. This is an implementation detail and
|
|
||||||
// subject to change.
|
|
||||||
func (b *Buffer) SetDeterministic(deterministic bool) {
|
|
||||||
b.deterministic = deterministic
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetBuf sets buf as the internal buffer,
|
|
||||||
// where the contents of buf are considered the unread portion of the buffer.
|
|
||||||
func (b *Buffer) SetBuf(buf []byte) {
|
|
||||||
b.buf = buf
|
|
||||||
b.idx = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset clears the internal buffer of all written and unread data.
|
|
||||||
func (b *Buffer) Reset() {
|
|
||||||
b.buf = b.buf[:0]
|
|
||||||
b.idx = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bytes returns the internal buffer.
|
|
||||||
func (b *Buffer) Bytes() []byte {
|
|
||||||
return b.buf
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unread returns the unread portion of the buffer.
|
|
||||||
func (b *Buffer) Unread() []byte {
|
|
||||||
return b.buf[b.idx:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Marshal appends the wire-format encoding of m to the buffer.
|
|
||||||
func (b *Buffer) Marshal(m Message) error {
|
|
||||||
var err error
|
|
||||||
b.buf, err = marshalAppend(b.buf, m, b.deterministic)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmarshal parses the wire-format message in the buffer and
|
|
||||||
// places the decoded results in m.
|
|
||||||
// It does not reset m before unmarshaling.
|
|
||||||
func (b *Buffer) Unmarshal(m Message) error {
|
|
||||||
err := UnmarshalMerge(b.Unread(), m)
|
|
||||||
b.idx = len(b.buf)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields }
|
|
||||||
|
|
||||||
func (m *unknownFields) String() string { panic("not implemented") }
|
|
||||||
func (m *unknownFields) Reset() { panic("not implemented") }
|
|
||||||
func (m *unknownFields) ProtoMessage() { panic("not implemented") }
|
|
||||||
|
|
||||||
// DebugPrint dumps the encoded bytes of b with a header and footer including s
|
|
||||||
// to stdout. This is only intended for debugging.
|
|
||||||
func (*Buffer) DebugPrint(s string, b []byte) {
|
|
||||||
m := MessageReflect(new(unknownFields))
|
|
||||||
m.SetUnknown(b)
|
|
||||||
b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface())
|
|
||||||
fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncodeVarint appends an unsigned varint encoding to the buffer.
|
|
||||||
func (b *Buffer) EncodeVarint(v uint64) error {
|
|
||||||
b.buf = protowire.AppendVarint(b.buf, v)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer.
|
|
||||||
func (b *Buffer) EncodeZigzag32(v uint64) error {
|
|
||||||
return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer.
|
|
||||||
func (b *Buffer) EncodeZigzag64(v uint64) error {
|
|
||||||
return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63))))
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncodeFixed32 appends a 32-bit little-endian integer to the buffer.
|
|
||||||
func (b *Buffer) EncodeFixed32(v uint64) error {
|
|
||||||
b.buf = protowire.AppendFixed32(b.buf, uint32(v))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncodeFixed64 appends a 64-bit little-endian integer to the buffer.
|
|
||||||
func (b *Buffer) EncodeFixed64(v uint64) error {
|
|
||||||
b.buf = protowire.AppendFixed64(b.buf, uint64(v))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncodeRawBytes appends a length-prefixed raw bytes to the buffer.
|
|
||||||
func (b *Buffer) EncodeRawBytes(v []byte) error {
|
|
||||||
b.buf = protowire.AppendBytes(b.buf, v)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncodeStringBytes appends a length-prefixed raw bytes to the buffer.
|
|
||||||
// It does not validate whether v contains valid UTF-8.
|
|
||||||
func (b *Buffer) EncodeStringBytes(v string) error {
|
|
||||||
b.buf = protowire.AppendString(b.buf, v)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncodeMessage appends a length-prefixed encoded message to the buffer.
|
|
||||||
func (b *Buffer) EncodeMessage(m Message) error {
|
|
||||||
var err error
|
|
||||||
b.buf = protowire.AppendVarint(b.buf, uint64(Size(m)))
|
|
||||||
b.buf, err = marshalAppend(b.buf, m, b.deterministic)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeVarint consumes an encoded unsigned varint from the buffer.
|
|
||||||
func (b *Buffer) DecodeVarint() (uint64, error) {
|
|
||||||
v, n := protowire.ConsumeVarint(b.buf[b.idx:])
|
|
||||||
if n < 0 {
|
|
||||||
return 0, protowire.ParseError(n)
|
|
||||||
}
|
|
||||||
b.idx += n
|
|
||||||
return uint64(v), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer.
|
|
||||||
func (b *Buffer) DecodeZigzag32() (uint64, error) {
|
|
||||||
v, err := b.DecodeVarint()
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer.
|
|
||||||
func (b *Buffer) DecodeZigzag64() (uint64, error) {
|
|
||||||
v, err := b.DecodeVarint()
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer.
|
|
||||||
func (b *Buffer) DecodeFixed32() (uint64, error) {
|
|
||||||
v, n := protowire.ConsumeFixed32(b.buf[b.idx:])
|
|
||||||
if n < 0 {
|
|
||||||
return 0, protowire.ParseError(n)
|
|
||||||
}
|
|
||||||
b.idx += n
|
|
||||||
return uint64(v), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer.
|
|
||||||
func (b *Buffer) DecodeFixed64() (uint64, error) {
|
|
||||||
v, n := protowire.ConsumeFixed64(b.buf[b.idx:])
|
|
||||||
if n < 0 {
|
|
||||||
return 0, protowire.ParseError(n)
|
|
||||||
}
|
|
||||||
b.idx += n
|
|
||||||
return uint64(v), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer.
|
|
||||||
// If alloc is specified, it returns a copy the raw bytes
|
|
||||||
// rather than a sub-slice of the buffer.
|
|
||||||
func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) {
|
|
||||||
v, n := protowire.ConsumeBytes(b.buf[b.idx:])
|
|
||||||
if n < 0 {
|
|
||||||
return nil, protowire.ParseError(n)
|
|
||||||
}
|
|
||||||
b.idx += n
|
|
||||||
if alloc {
|
|
||||||
v = append([]byte(nil), v...)
|
|
||||||
}
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer.
|
|
||||||
// It does not validate whether the raw bytes contain valid UTF-8.
|
|
||||||
func (b *Buffer) DecodeStringBytes() (string, error) {
|
|
||||||
v, n := protowire.ConsumeString(b.buf[b.idx:])
|
|
||||||
if n < 0 {
|
|
||||||
return "", protowire.ParseError(n)
|
|
||||||
}
|
|
||||||
b.idx += n
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeMessage consumes a length-prefixed message from the buffer.
|
|
||||||
// It does not reset m before unmarshaling.
|
|
||||||
func (b *Buffer) DecodeMessage(m Message) error {
|
|
||||||
v, err := b.DecodeRawBytes(false)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return UnmarshalMerge(v, m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeGroup consumes a message group from the buffer.
|
|
||||||
// It assumes that the start group marker has already been consumed and
|
|
||||||
// consumes all bytes until (and including the end group marker).
|
|
||||||
// It does not reset m before unmarshaling.
|
|
||||||
func (b *Buffer) DecodeGroup(m Message) error {
|
|
||||||
v, n, err := consumeGroup(b.buf[b.idx:])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
b.idx += n
|
|
||||||
return UnmarshalMerge(v, m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// consumeGroup parses b until it finds an end group marker, returning
|
|
||||||
// the raw bytes of the message (excluding the end group marker) and the
|
|
||||||
// the total length of the message (including the end group marker).
|
|
||||||
func consumeGroup(b []byte) ([]byte, int, error) {
|
|
||||||
b0 := b
|
|
||||||
depth := 1 // assume this follows a start group marker
|
|
||||||
for {
|
|
||||||
_, wtyp, tagLen := protowire.ConsumeTag(b)
|
|
||||||
if tagLen < 0 {
|
|
||||||
return nil, 0, protowire.ParseError(tagLen)
|
|
||||||
}
|
|
||||||
b = b[tagLen:]
|
|
||||||
|
|
||||||
var valLen int
|
|
||||||
switch wtyp {
|
|
||||||
case protowire.VarintType:
|
|
||||||
_, valLen = protowire.ConsumeVarint(b)
|
|
||||||
case protowire.Fixed32Type:
|
|
||||||
_, valLen = protowire.ConsumeFixed32(b)
|
|
||||||
case protowire.Fixed64Type:
|
|
||||||
_, valLen = protowire.ConsumeFixed64(b)
|
|
||||||
case protowire.BytesType:
|
|
||||||
_, valLen = protowire.ConsumeBytes(b)
|
|
||||||
case protowire.StartGroupType:
|
|
||||||
depth++
|
|
||||||
case protowire.EndGroupType:
|
|
||||||
depth--
|
|
||||||
default:
|
|
||||||
return nil, 0, errors.New("proto: cannot parse reserved wire type")
|
|
||||||
}
|
|
||||||
if valLen < 0 {
|
|
||||||
return nil, 0, protowire.ParseError(valLen)
|
|
||||||
}
|
|
||||||
b = b[valLen:]
|
|
||||||
|
|
||||||
if depth == 0 {
|
|
||||||
return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
63
vendor/github.com/golang/protobuf/proto/defaults.go
generated
vendored
63
vendor/github.com/golang/protobuf/proto/defaults.go
generated
vendored
@ -1,63 +0,0 @@
|
|||||||
// Copyright 2019 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
import (
|
|
||||||
"google.golang.org/protobuf/reflect/protoreflect"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SetDefaults sets unpopulated scalar fields to their default values.
|
|
||||||
// Fields within a oneof are not set even if they have a default value.
|
|
||||||
// SetDefaults is recursively called upon any populated message fields.
|
|
||||||
func SetDefaults(m Message) {
|
|
||||||
if m != nil {
|
|
||||||
setDefaults(MessageReflect(m))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func setDefaults(m protoreflect.Message) {
|
|
||||||
fds := m.Descriptor().Fields()
|
|
||||||
for i := 0; i < fds.Len(); i++ {
|
|
||||||
fd := fds.Get(i)
|
|
||||||
if !m.Has(fd) {
|
|
||||||
if fd.HasDefault() && fd.ContainingOneof() == nil {
|
|
||||||
v := fd.Default()
|
|
||||||
if fd.Kind() == protoreflect.BytesKind {
|
|
||||||
v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes
|
|
||||||
}
|
|
||||||
m.Set(fd, v)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
|
||||||
switch {
|
|
||||||
// Handle singular message.
|
|
||||||
case fd.Cardinality() != protoreflect.Repeated:
|
|
||||||
if fd.Message() != nil {
|
|
||||||
setDefaults(m.Get(fd).Message())
|
|
||||||
}
|
|
||||||
// Handle list of messages.
|
|
||||||
case fd.IsList():
|
|
||||||
if fd.Message() != nil {
|
|
||||||
ls := m.Get(fd).List()
|
|
||||||
for i := 0; i < ls.Len(); i++ {
|
|
||||||
setDefaults(ls.Get(i).Message())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Handle map of messages.
|
|
||||||
case fd.IsMap():
|
|
||||||
if fd.MapValue().Message() != nil {
|
|
||||||
ms := m.Get(fd).Map()
|
|
||||||
ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
|
|
||||||
setDefaults(v.Message())
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
}
|
|
113
vendor/github.com/golang/protobuf/proto/deprecated.go
generated
vendored
113
vendor/github.com/golang/protobuf/proto/deprecated.go
generated
vendored
@ -1,113 +0,0 @@
|
|||||||
// Copyright 2018 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
protoV2 "google.golang.org/protobuf/proto"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// Deprecated: No longer returned.
|
|
||||||
ErrNil = errors.New("proto: Marshal called with nil")
|
|
||||||
|
|
||||||
// Deprecated: No longer returned.
|
|
||||||
ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
|
|
||||||
|
|
||||||
// Deprecated: No longer returned.
|
|
||||||
ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
|
|
||||||
)
|
|
||||||
|
|
||||||
// Deprecated: Do not use.
|
|
||||||
type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
|
|
||||||
|
|
||||||
// Deprecated: Do not use.
|
|
||||||
func GetStats() Stats { return Stats{} }
|
|
||||||
|
|
||||||
// Deprecated: Do not use.
|
|
||||||
func MarshalMessageSet(interface{}) ([]byte, error) {
|
|
||||||
return nil, errors.New("proto: not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Do not use.
|
|
||||||
func UnmarshalMessageSet([]byte, interface{}) error {
|
|
||||||
return errors.New("proto: not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Do not use.
|
|
||||||
func MarshalMessageSetJSON(interface{}) ([]byte, error) {
|
|
||||||
return nil, errors.New("proto: not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Do not use.
|
|
||||||
func UnmarshalMessageSetJSON([]byte, interface{}) error {
|
|
||||||
return errors.New("proto: not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Do not use.
|
|
||||||
func RegisterMessageSetType(Message, int32, string) {}
|
|
||||||
|
|
||||||
// Deprecated: Do not use.
|
|
||||||
func EnumName(m map[int32]string, v int32) string {
|
|
||||||
s, ok := m[v]
|
|
||||||
if ok {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
return strconv.Itoa(int(v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Do not use.
|
|
||||||
func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
|
|
||||||
if data[0] == '"' {
|
|
||||||
// New style: enums are strings.
|
|
||||||
var repr string
|
|
||||||
if err := json.Unmarshal(data, &repr); err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
val, ok := m[repr]
|
|
||||||
if !ok {
|
|
||||||
return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
|
|
||||||
}
|
|
||||||
return val, nil
|
|
||||||
}
|
|
||||||
// Old style: enums are ints.
|
|
||||||
var val int32
|
|
||||||
if err := json.Unmarshal(data, &val); err != nil {
|
|
||||||
return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
|
|
||||||
}
|
|
||||||
return val, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Do not use; this type existed for intenal-use only.
|
|
||||||
type InternalMessageInfo struct{}
|
|
||||||
|
|
||||||
// Deprecated: Do not use; this method existed for intenal-use only.
|
|
||||||
func (*InternalMessageInfo) DiscardUnknown(m Message) {
|
|
||||||
DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Do not use; this method existed for intenal-use only.
|
|
||||||
func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) {
|
|
||||||
return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Do not use; this method existed for intenal-use only.
|
|
||||||
func (*InternalMessageInfo) Merge(dst, src Message) {
|
|
||||||
protoV2.Merge(MessageV2(dst), MessageV2(src))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Do not use; this method existed for intenal-use only.
|
|
||||||
func (*InternalMessageInfo) Size(m Message) int {
|
|
||||||
return protoV2.Size(MessageV2(m))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Do not use; this method existed for intenal-use only.
|
|
||||||
func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error {
|
|
||||||
return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m))
|
|
||||||
}
|
|
58
vendor/github.com/golang/protobuf/proto/discard.go
generated
vendored
58
vendor/github.com/golang/protobuf/proto/discard.go
generated
vendored
@ -1,58 +0,0 @@
|
|||||||
// Copyright 2019 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
import (
|
|
||||||
"google.golang.org/protobuf/reflect/protoreflect"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DiscardUnknown recursively discards all unknown fields from this message
|
|
||||||
// and all embedded messages.
|
|
||||||
//
|
|
||||||
// When unmarshaling a message with unrecognized fields, the tags and values
|
|
||||||
// of such fields are preserved in the Message. This allows a later call to
|
|
||||||
// marshal to be able to produce a message that continues to have those
|
|
||||||
// unrecognized fields. To avoid this, DiscardUnknown is used to
|
|
||||||
// explicitly clear the unknown fields after unmarshaling.
|
|
||||||
func DiscardUnknown(m Message) {
|
|
||||||
if m != nil {
|
|
||||||
discardUnknown(MessageReflect(m))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func discardUnknown(m protoreflect.Message) {
|
|
||||||
m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool {
|
|
||||||
switch {
|
|
||||||
// Handle singular message.
|
|
||||||
case fd.Cardinality() != protoreflect.Repeated:
|
|
||||||
if fd.Message() != nil {
|
|
||||||
discardUnknown(m.Get(fd).Message())
|
|
||||||
}
|
|
||||||
// Handle list of messages.
|
|
||||||
case fd.IsList():
|
|
||||||
if fd.Message() != nil {
|
|
||||||
ls := m.Get(fd).List()
|
|
||||||
for i := 0; i < ls.Len(); i++ {
|
|
||||||
discardUnknown(ls.Get(i).Message())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Handle map of messages.
|
|
||||||
case fd.IsMap():
|
|
||||||
if fd.MapValue().Message() != nil {
|
|
||||||
ms := m.Get(fd).Map()
|
|
||||||
ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
|
|
||||||
discardUnknown(v.Message())
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
|
|
||||||
// Discard unknown fields.
|
|
||||||
if len(m.GetUnknown()) > 0 {
|
|
||||||
m.SetUnknown(nil)
|
|
||||||
}
|
|
||||||
}
|
|
356
vendor/github.com/golang/protobuf/proto/extensions.go
generated
vendored
356
vendor/github.com/golang/protobuf/proto/extensions.go
generated
vendored
@ -1,356 +0,0 @@
|
|||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
|
|
||||||
"google.golang.org/protobuf/encoding/protowire"
|
|
||||||
"google.golang.org/protobuf/proto"
|
|
||||||
"google.golang.org/protobuf/reflect/protoreflect"
|
|
||||||
"google.golang.org/protobuf/reflect/protoregistry"
|
|
||||||
"google.golang.org/protobuf/runtime/protoiface"
|
|
||||||
"google.golang.org/protobuf/runtime/protoimpl"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
// ExtensionDesc represents an extension descriptor and
|
|
||||||
// is used to interact with an extension field in a message.
|
|
||||||
//
|
|
||||||
// Variables of this type are generated in code by protoc-gen-go.
|
|
||||||
ExtensionDesc = protoimpl.ExtensionInfo
|
|
||||||
|
|
||||||
// ExtensionRange represents a range of message extensions.
|
|
||||||
// Used in code generated by protoc-gen-go.
|
|
||||||
ExtensionRange = protoiface.ExtensionRangeV1
|
|
||||||
|
|
||||||
// Deprecated: Do not use; this is an internal type.
|
|
||||||
Extension = protoimpl.ExtensionFieldV1
|
|
||||||
|
|
||||||
// Deprecated: Do not use; this is an internal type.
|
|
||||||
XXX_InternalExtensions = protoimpl.ExtensionFields
|
|
||||||
)
|
|
||||||
|
|
||||||
// ErrMissingExtension reports whether the extension was not present.
|
|
||||||
var ErrMissingExtension = errors.New("proto: missing extension")
|
|
||||||
|
|
||||||
var errNotExtendable = errors.New("proto: not an extendable proto.Message")
|
|
||||||
|
|
||||||
// HasExtension reports whether the extension field is present in m
|
|
||||||
// either as an explicitly populated field or as an unknown field.
|
|
||||||
func HasExtension(m Message, xt *ExtensionDesc) (has bool) {
|
|
||||||
mr := MessageReflect(m)
|
|
||||||
if mr == nil || !mr.IsValid() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check whether any populated known field matches the field number.
|
|
||||||
xtd := xt.TypeDescriptor()
|
|
||||||
if isValidExtension(mr.Descriptor(), xtd) {
|
|
||||||
has = mr.Has(xtd)
|
|
||||||
} else {
|
|
||||||
mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
|
|
||||||
has = int32(fd.Number()) == xt.Field
|
|
||||||
return !has
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check whether any unknown field matches the field number.
|
|
||||||
for b := mr.GetUnknown(); !has && len(b) > 0; {
|
|
||||||
num, _, n := protowire.ConsumeField(b)
|
|
||||||
has = int32(num) == xt.Field
|
|
||||||
b = b[n:]
|
|
||||||
}
|
|
||||||
return has
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClearExtension removes the extension field from m
|
|
||||||
// either as an explicitly populated field or as an unknown field.
|
|
||||||
func ClearExtension(m Message, xt *ExtensionDesc) {
|
|
||||||
mr := MessageReflect(m)
|
|
||||||
if mr == nil || !mr.IsValid() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
xtd := xt.TypeDescriptor()
|
|
||||||
if isValidExtension(mr.Descriptor(), xtd) {
|
|
||||||
mr.Clear(xtd)
|
|
||||||
} else {
|
|
||||||
mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
|
|
||||||
if int32(fd.Number()) == xt.Field {
|
|
||||||
mr.Clear(fd)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
}
|
|
||||||
clearUnknown(mr, fieldNum(xt.Field))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClearAllExtensions clears all extensions from m.
|
|
||||||
// This includes populated fields and unknown fields in the extension range.
|
|
||||||
func ClearAllExtensions(m Message) {
|
|
||||||
mr := MessageReflect(m)
|
|
||||||
if mr == nil || !mr.IsValid() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
|
|
||||||
if fd.IsExtension() {
|
|
||||||
mr.Clear(fd)
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
clearUnknown(mr, mr.Descriptor().ExtensionRanges())
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetExtension retrieves a proto2 extended field from m.
|
|
||||||
//
|
|
||||||
// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
|
|
||||||
// then GetExtension parses the encoded field and returns a Go value of the specified type.
|
|
||||||
// If the field is not present, then the default value is returned (if one is specified),
|
|
||||||
// otherwise ErrMissingExtension is reported.
|
|
||||||
//
|
|
||||||
// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil),
|
|
||||||
// then GetExtension returns the raw encoded bytes for the extension field.
|
|
||||||
func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) {
|
|
||||||
mr := MessageReflect(m)
|
|
||||||
if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
|
|
||||||
return nil, errNotExtendable
|
|
||||||
}
|
|
||||||
|
|
||||||
// Retrieve the unknown fields for this extension field.
|
|
||||||
var bo protoreflect.RawFields
|
|
||||||
for bi := mr.GetUnknown(); len(bi) > 0; {
|
|
||||||
num, _, n := protowire.ConsumeField(bi)
|
|
||||||
if int32(num) == xt.Field {
|
|
||||||
bo = append(bo, bi[:n]...)
|
|
||||||
}
|
|
||||||
bi = bi[n:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// For type incomplete descriptors, only retrieve the unknown fields.
|
|
||||||
if xt.ExtensionType == nil {
|
|
||||||
return []byte(bo), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the extension field only exists as unknown fields, unmarshal it.
|
|
||||||
// This is rarely done since proto.Unmarshal eagerly unmarshals extensions.
|
|
||||||
xtd := xt.TypeDescriptor()
|
|
||||||
if !isValidExtension(mr.Descriptor(), xtd) {
|
|
||||||
return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
|
|
||||||
}
|
|
||||||
if !mr.Has(xtd) && len(bo) > 0 {
|
|
||||||
m2 := mr.New()
|
|
||||||
if err := (proto.UnmarshalOptions{
|
|
||||||
Resolver: extensionResolver{xt},
|
|
||||||
}.Unmarshal(bo, m2.Interface())); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if m2.Has(xtd) {
|
|
||||||
mr.Set(xtd, m2.Get(xtd))
|
|
||||||
clearUnknown(mr, fieldNum(xt.Field))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check whether the message has the extension field set or a default.
|
|
||||||
var pv protoreflect.Value
|
|
||||||
switch {
|
|
||||||
case mr.Has(xtd):
|
|
||||||
pv = mr.Get(xtd)
|
|
||||||
case xtd.HasDefault():
|
|
||||||
pv = xtd.Default()
|
|
||||||
default:
|
|
||||||
return nil, ErrMissingExtension
|
|
||||||
}
|
|
||||||
|
|
||||||
v := xt.InterfaceOf(pv)
|
|
||||||
rv := reflect.ValueOf(v)
|
|
||||||
if isScalarKind(rv.Kind()) {
|
|
||||||
rv2 := reflect.New(rv.Type())
|
|
||||||
rv2.Elem().Set(rv)
|
|
||||||
v = rv2.Interface()
|
|
||||||
}
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// extensionResolver is a custom extension resolver that stores a single
|
|
||||||
// extension type that takes precedence over the global registry.
|
|
||||||
type extensionResolver struct{ xt protoreflect.ExtensionType }
|
|
||||||
|
|
||||||
func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
|
|
||||||
if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field {
|
|
||||||
return r.xt, nil
|
|
||||||
}
|
|
||||||
return protoregistry.GlobalTypes.FindExtensionByName(field)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
|
|
||||||
if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field {
|
|
||||||
return r.xt, nil
|
|
||||||
}
|
|
||||||
return protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetExtensions returns a list of the extensions values present in m,
|
|
||||||
// corresponding with the provided list of extension descriptors, xts.
|
|
||||||
// If an extension is missing in m, the corresponding value is nil.
|
|
||||||
func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) {
|
|
||||||
mr := MessageReflect(m)
|
|
||||||
if mr == nil || !mr.IsValid() {
|
|
||||||
return nil, errNotExtendable
|
|
||||||
}
|
|
||||||
|
|
||||||
vs := make([]interface{}, len(xts))
|
|
||||||
for i, xt := range xts {
|
|
||||||
v, err := GetExtension(m, xt)
|
|
||||||
if err != nil {
|
|
||||||
if err == ErrMissingExtension {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return vs, err
|
|
||||||
}
|
|
||||||
vs[i] = v
|
|
||||||
}
|
|
||||||
return vs, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetExtension sets an extension field in m to the provided value.
|
|
||||||
func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error {
|
|
||||||
mr := MessageReflect(m)
|
|
||||||
if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
|
|
||||||
return errNotExtendable
|
|
||||||
}
|
|
||||||
|
|
||||||
rv := reflect.ValueOf(v)
|
|
||||||
if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) {
|
|
||||||
return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType)
|
|
||||||
}
|
|
||||||
if rv.Kind() == reflect.Ptr {
|
|
||||||
if rv.IsNil() {
|
|
||||||
return fmt.Errorf("proto: SetExtension called with nil value of type %T", v)
|
|
||||||
}
|
|
||||||
if isScalarKind(rv.Elem().Kind()) {
|
|
||||||
v = rv.Elem().Interface()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
xtd := xt.TypeDescriptor()
|
|
||||||
if !isValidExtension(mr.Descriptor(), xtd) {
|
|
||||||
return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
|
|
||||||
}
|
|
||||||
mr.Set(xtd, xt.ValueOf(v))
|
|
||||||
clearUnknown(mr, fieldNum(xt.Field))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetRawExtension inserts b into the unknown fields of m.
|
|
||||||
//
|
|
||||||
// Deprecated: Use Message.ProtoReflect.SetUnknown instead.
|
|
||||||
func SetRawExtension(m Message, fnum int32, b []byte) {
|
|
||||||
mr := MessageReflect(m)
|
|
||||||
if mr == nil || !mr.IsValid() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify that the raw field is valid.
|
|
||||||
for b0 := b; len(b0) > 0; {
|
|
||||||
num, _, n := protowire.ConsumeField(b0)
|
|
||||||
if int32(num) != fnum {
|
|
||||||
panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum))
|
|
||||||
}
|
|
||||||
b0 = b0[n:]
|
|
||||||
}
|
|
||||||
|
|
||||||
ClearExtension(m, &ExtensionDesc{Field: fnum})
|
|
||||||
mr.SetUnknown(append(mr.GetUnknown(), b...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExtensionDescs returns a list of extension descriptors found in m,
|
|
||||||
// containing descriptors for both populated extension fields in m and
|
|
||||||
// also unknown fields of m that are in the extension range.
|
|
||||||
// For the later case, an type incomplete descriptor is provided where only
|
|
||||||
// the ExtensionDesc.Field field is populated.
|
|
||||||
// The order of the extension descriptors is undefined.
|
|
||||||
func ExtensionDescs(m Message) ([]*ExtensionDesc, error) {
|
|
||||||
mr := MessageReflect(m)
|
|
||||||
if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
|
|
||||||
return nil, errNotExtendable
|
|
||||||
}
|
|
||||||
|
|
||||||
// Collect a set of known extension descriptors.
|
|
||||||
extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc)
|
|
||||||
mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
|
||||||
if fd.IsExtension() {
|
|
||||||
xt := fd.(protoreflect.ExtensionTypeDescriptor)
|
|
||||||
if xd, ok := xt.Type().(*ExtensionDesc); ok {
|
|
||||||
extDescs[fd.Number()] = xd
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
|
|
||||||
// Collect a set of unknown extension descriptors.
|
|
||||||
extRanges := mr.Descriptor().ExtensionRanges()
|
|
||||||
for b := mr.GetUnknown(); len(b) > 0; {
|
|
||||||
num, _, n := protowire.ConsumeField(b)
|
|
||||||
if extRanges.Has(num) && extDescs[num] == nil {
|
|
||||||
extDescs[num] = nil
|
|
||||||
}
|
|
||||||
b = b[n:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Transpose the set of descriptors into a list.
|
|
||||||
var xts []*ExtensionDesc
|
|
||||||
for num, xt := range extDescs {
|
|
||||||
if xt == nil {
|
|
||||||
xt = &ExtensionDesc{Field: int32(num)}
|
|
||||||
}
|
|
||||||
xts = append(xts, xt)
|
|
||||||
}
|
|
||||||
return xts, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// isValidExtension reports whether xtd is a valid extension descriptor for md.
|
|
||||||
func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool {
|
|
||||||
return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number())
|
|
||||||
}
|
|
||||||
|
|
||||||
// isScalarKind reports whether k is a protobuf scalar kind (except bytes).
|
|
||||||
// This function exists for historical reasons since the representation of
|
|
||||||
// scalars differs between v1 and v2, where v1 uses *T and v2 uses T.
|
|
||||||
func isScalarKind(k reflect.Kind) bool {
|
|
||||||
switch k {
|
|
||||||
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
|
|
||||||
return true
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// clearUnknown removes unknown fields from m where remover.Has reports true.
|
|
||||||
func clearUnknown(m protoreflect.Message, remover interface {
|
|
||||||
Has(protoreflect.FieldNumber) bool
|
|
||||||
}) {
|
|
||||||
var bo protoreflect.RawFields
|
|
||||||
for bi := m.GetUnknown(); len(bi) > 0; {
|
|
||||||
num, _, n := protowire.ConsumeField(bi)
|
|
||||||
if !remover.Has(num) {
|
|
||||||
bo = append(bo, bi[:n]...)
|
|
||||||
}
|
|
||||||
bi = bi[n:]
|
|
||||||
}
|
|
||||||
if bi := m.GetUnknown(); len(bi) != len(bo) {
|
|
||||||
m.SetUnknown(bo)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type fieldNum protoreflect.FieldNumber
|
|
||||||
|
|
||||||
func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool {
|
|
||||||
return protoreflect.FieldNumber(n1) == n2
|
|
||||||
}
|
|
306
vendor/github.com/golang/protobuf/proto/properties.go
generated
vendored
306
vendor/github.com/golang/protobuf/proto/properties.go
generated
vendored
@ -1,306 +0,0 @@
|
|||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"google.golang.org/protobuf/reflect/protoreflect"
|
|
||||||
"google.golang.org/protobuf/runtime/protoimpl"
|
|
||||||
)
|
|
||||||
|
|
||||||
// StructProperties represents protocol buffer type information for a
|
|
||||||
// generated protobuf message in the open-struct API.
|
|
||||||
//
|
|
||||||
// Deprecated: Do not use.
|
|
||||||
type StructProperties struct {
|
|
||||||
// Prop are the properties for each field.
|
|
||||||
//
|
|
||||||
// Fields belonging to a oneof are stored in OneofTypes instead, with a
|
|
||||||
// single Properties representing the parent oneof held here.
|
|
||||||
//
|
|
||||||
// The order of Prop matches the order of fields in the Go struct.
|
|
||||||
// Struct fields that are not related to protobufs have a "XXX_" prefix
|
|
||||||
// in the Properties.Name and must be ignored by the user.
|
|
||||||
Prop []*Properties
|
|
||||||
|
|
||||||
// OneofTypes contains information about the oneof fields in this message.
|
|
||||||
// It is keyed by the protobuf field name.
|
|
||||||
OneofTypes map[string]*OneofProperties
|
|
||||||
}
|
|
||||||
|
|
||||||
// Properties represents the type information for a protobuf message field.
|
|
||||||
//
|
|
||||||
// Deprecated: Do not use.
|
|
||||||
type Properties struct {
|
|
||||||
// Name is a placeholder name with little meaningful semantic value.
|
|
||||||
// If the name has an "XXX_" prefix, the entire Properties must be ignored.
|
|
||||||
Name string
|
|
||||||
// OrigName is the protobuf field name or oneof name.
|
|
||||||
OrigName string
|
|
||||||
// JSONName is the JSON name for the protobuf field.
|
|
||||||
JSONName string
|
|
||||||
// Enum is a placeholder name for enums.
|
|
||||||
// For historical reasons, this is neither the Go name for the enum,
|
|
||||||
// nor the protobuf name for the enum.
|
|
||||||
Enum string // Deprecated: Do not use.
|
|
||||||
// Weak contains the full name of the weakly referenced message.
|
|
||||||
Weak string
|
|
||||||
// Wire is a string representation of the wire type.
|
|
||||||
Wire string
|
|
||||||
// WireType is the protobuf wire type for the field.
|
|
||||||
WireType int
|
|
||||||
// Tag is the protobuf field number.
|
|
||||||
Tag int
|
|
||||||
// Required reports whether this is a required field.
|
|
||||||
Required bool
|
|
||||||
// Optional reports whether this is a optional field.
|
|
||||||
Optional bool
|
|
||||||
// Repeated reports whether this is a repeated field.
|
|
||||||
Repeated bool
|
|
||||||
// Packed reports whether this is a packed repeated field of scalars.
|
|
||||||
Packed bool
|
|
||||||
// Proto3 reports whether this field operates under the proto3 syntax.
|
|
||||||
Proto3 bool
|
|
||||||
// Oneof reports whether this field belongs within a oneof.
|
|
||||||
Oneof bool
|
|
||||||
|
|
||||||
// Default is the default value in string form.
|
|
||||||
Default string
|
|
||||||
// HasDefault reports whether the field has a default value.
|
|
||||||
HasDefault bool
|
|
||||||
|
|
||||||
// MapKeyProp is the properties for the key field for a map field.
|
|
||||||
MapKeyProp *Properties
|
|
||||||
// MapValProp is the properties for the value field for a map field.
|
|
||||||
MapValProp *Properties
|
|
||||||
}
|
|
||||||
|
|
||||||
// OneofProperties represents the type information for a protobuf oneof.
|
|
||||||
//
|
|
||||||
// Deprecated: Do not use.
|
|
||||||
type OneofProperties struct {
|
|
||||||
// Type is a pointer to the generated wrapper type for the field value.
|
|
||||||
// This is nil for messages that are not in the open-struct API.
|
|
||||||
Type reflect.Type
|
|
||||||
// Field is the index into StructProperties.Prop for the containing oneof.
|
|
||||||
Field int
|
|
||||||
// Prop is the properties for the field.
|
|
||||||
Prop *Properties
|
|
||||||
}
|
|
||||||
|
|
||||||
// String formats the properties in the protobuf struct field tag style.
|
|
||||||
func (p *Properties) String() string {
|
|
||||||
s := p.Wire
|
|
||||||
s += "," + strconv.Itoa(p.Tag)
|
|
||||||
if p.Required {
|
|
||||||
s += ",req"
|
|
||||||
}
|
|
||||||
if p.Optional {
|
|
||||||
s += ",opt"
|
|
||||||
}
|
|
||||||
if p.Repeated {
|
|
||||||
s += ",rep"
|
|
||||||
}
|
|
||||||
if p.Packed {
|
|
||||||
s += ",packed"
|
|
||||||
}
|
|
||||||
s += ",name=" + p.OrigName
|
|
||||||
if p.JSONName != "" {
|
|
||||||
s += ",json=" + p.JSONName
|
|
||||||
}
|
|
||||||
if len(p.Enum) > 0 {
|
|
||||||
s += ",enum=" + p.Enum
|
|
||||||
}
|
|
||||||
if len(p.Weak) > 0 {
|
|
||||||
s += ",weak=" + p.Weak
|
|
||||||
}
|
|
||||||
if p.Proto3 {
|
|
||||||
s += ",proto3"
|
|
||||||
}
|
|
||||||
if p.Oneof {
|
|
||||||
s += ",oneof"
|
|
||||||
}
|
|
||||||
if p.HasDefault {
|
|
||||||
s += ",def=" + p.Default
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse populates p by parsing a string in the protobuf struct field tag style.
|
|
||||||
func (p *Properties) Parse(tag string) {
|
|
||||||
// For example: "bytes,49,opt,name=foo,def=hello!"
|
|
||||||
for len(tag) > 0 {
|
|
||||||
i := strings.IndexByte(tag, ',')
|
|
||||||
if i < 0 {
|
|
||||||
i = len(tag)
|
|
||||||
}
|
|
||||||
switch s := tag[:i]; {
|
|
||||||
case strings.HasPrefix(s, "name="):
|
|
||||||
p.OrigName = s[len("name="):]
|
|
||||||
case strings.HasPrefix(s, "json="):
|
|
||||||
p.JSONName = s[len("json="):]
|
|
||||||
case strings.HasPrefix(s, "enum="):
|
|
||||||
p.Enum = s[len("enum="):]
|
|
||||||
case strings.HasPrefix(s, "weak="):
|
|
||||||
p.Weak = s[len("weak="):]
|
|
||||||
case strings.Trim(s, "0123456789") == "":
|
|
||||||
n, _ := strconv.ParseUint(s, 10, 32)
|
|
||||||
p.Tag = int(n)
|
|
||||||
case s == "opt":
|
|
||||||
p.Optional = true
|
|
||||||
case s == "req":
|
|
||||||
p.Required = true
|
|
||||||
case s == "rep":
|
|
||||||
p.Repeated = true
|
|
||||||
case s == "varint" || s == "zigzag32" || s == "zigzag64":
|
|
||||||
p.Wire = s
|
|
||||||
p.WireType = WireVarint
|
|
||||||
case s == "fixed32":
|
|
||||||
p.Wire = s
|
|
||||||
p.WireType = WireFixed32
|
|
||||||
case s == "fixed64":
|
|
||||||
p.Wire = s
|
|
||||||
p.WireType = WireFixed64
|
|
||||||
case s == "bytes":
|
|
||||||
p.Wire = s
|
|
||||||
p.WireType = WireBytes
|
|
||||||
case s == "group":
|
|
||||||
p.Wire = s
|
|
||||||
p.WireType = WireStartGroup
|
|
||||||
case s == "packed":
|
|
||||||
p.Packed = true
|
|
||||||
case s == "proto3":
|
|
||||||
p.Proto3 = true
|
|
||||||
case s == "oneof":
|
|
||||||
p.Oneof = true
|
|
||||||
case strings.HasPrefix(s, "def="):
|
|
||||||
// The default tag is special in that everything afterwards is the
|
|
||||||
// default regardless of the presence of commas.
|
|
||||||
p.HasDefault = true
|
|
||||||
p.Default, i = tag[len("def="):], len(tag)
|
|
||||||
}
|
|
||||||
tag = strings.TrimPrefix(tag[i:], ",")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Init populates the properties from a protocol buffer struct tag.
|
|
||||||
//
|
|
||||||
// Deprecated: Do not use.
|
|
||||||
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
|
|
||||||
p.Name = name
|
|
||||||
p.OrigName = name
|
|
||||||
if tag == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
p.Parse(tag)
|
|
||||||
|
|
||||||
if typ != nil && typ.Kind() == reflect.Map {
|
|
||||||
p.MapKeyProp = new(Properties)
|
|
||||||
p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil)
|
|
||||||
p.MapValProp = new(Properties)
|
|
||||||
p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var propertiesCache sync.Map // map[reflect.Type]*StructProperties
|
|
||||||
|
|
||||||
// GetProperties returns the list of properties for the type represented by t,
|
|
||||||
// which must be a generated protocol buffer message in the open-struct API,
|
|
||||||
// where protobuf message fields are represented by exported Go struct fields.
|
|
||||||
//
|
|
||||||
// Deprecated: Use protobuf reflection instead.
|
|
||||||
func GetProperties(t reflect.Type) *StructProperties {
|
|
||||||
if p, ok := propertiesCache.Load(t); ok {
|
|
||||||
return p.(*StructProperties)
|
|
||||||
}
|
|
||||||
p, _ := propertiesCache.LoadOrStore(t, newProperties(t))
|
|
||||||
return p.(*StructProperties)
|
|
||||||
}
|
|
||||||
|
|
||||||
func newProperties(t reflect.Type) *StructProperties {
|
|
||||||
if t.Kind() != reflect.Struct {
|
|
||||||
panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
|
|
||||||
}
|
|
||||||
|
|
||||||
var hasOneof bool
|
|
||||||
prop := new(StructProperties)
|
|
||||||
|
|
||||||
// Construct a list of properties for each field in the struct.
|
|
||||||
for i := 0; i < t.NumField(); i++ {
|
|
||||||
p := new(Properties)
|
|
||||||
f := t.Field(i)
|
|
||||||
tagField := f.Tag.Get("protobuf")
|
|
||||||
p.Init(f.Type, f.Name, tagField, &f)
|
|
||||||
|
|
||||||
tagOneof := f.Tag.Get("protobuf_oneof")
|
|
||||||
if tagOneof != "" {
|
|
||||||
hasOneof = true
|
|
||||||
p.OrigName = tagOneof
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rename unrelated struct fields with the "XXX_" prefix since so much
|
|
||||||
// user code simply checks for this to exclude special fields.
|
|
||||||
if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") {
|
|
||||||
p.Name = "XXX_" + p.Name
|
|
||||||
p.OrigName = "XXX_" + p.OrigName
|
|
||||||
} else if p.Weak != "" {
|
|
||||||
p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field
|
|
||||||
}
|
|
||||||
|
|
||||||
prop.Prop = append(prop.Prop, p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Construct a mapping of oneof field names to properties.
|
|
||||||
if hasOneof {
|
|
||||||
var oneofWrappers []interface{}
|
|
||||||
if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok {
|
|
||||||
oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{})
|
|
||||||
}
|
|
||||||
if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok {
|
|
||||||
oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{})
|
|
||||||
}
|
|
||||||
if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok {
|
|
||||||
if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok {
|
|
||||||
oneofWrappers = m.ProtoMessageInfo().OneofWrappers
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
prop.OneofTypes = make(map[string]*OneofProperties)
|
|
||||||
for _, wrapper := range oneofWrappers {
|
|
||||||
p := &OneofProperties{
|
|
||||||
Type: reflect.ValueOf(wrapper).Type(), // *T
|
|
||||||
Prop: new(Properties),
|
|
||||||
}
|
|
||||||
f := p.Type.Elem().Field(0)
|
|
||||||
p.Prop.Name = f.Name
|
|
||||||
p.Prop.Parse(f.Tag.Get("protobuf"))
|
|
||||||
|
|
||||||
// Determine the struct field that contains this oneof.
|
|
||||||
// Each wrapper is assignable to exactly one parent field.
|
|
||||||
var foundOneof bool
|
|
||||||
for i := 0; i < t.NumField() && !foundOneof; i++ {
|
|
||||||
if p.Type.AssignableTo(t.Field(i).Type) {
|
|
||||||
p.Field = i
|
|
||||||
foundOneof = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !foundOneof {
|
|
||||||
panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
|
|
||||||
}
|
|
||||||
prop.OneofTypes[p.Prop.OrigName] = p
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return prop
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sp *StructProperties) Len() int { return len(sp.Prop) }
|
|
||||||
func (sp *StructProperties) Less(i, j int) bool { return false }
|
|
||||||
func (sp *StructProperties) Swap(i, j int) { return }
|
|
167
vendor/github.com/golang/protobuf/proto/proto.go
generated
vendored
167
vendor/github.com/golang/protobuf/proto/proto.go
generated
vendored
@ -1,167 +0,0 @@
|
|||||||
// Copyright 2019 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package proto provides functionality for handling protocol buffer messages.
|
|
||||||
// In particular, it provides marshaling and unmarshaling between a protobuf
|
|
||||||
// message and the binary wire format.
|
|
||||||
//
|
|
||||||
// See https://developers.google.com/protocol-buffers/docs/gotutorial for
|
|
||||||
// more information.
|
|
||||||
//
|
|
||||||
// Deprecated: Use the "google.golang.org/protobuf/proto" package instead.
|
|
||||||
package proto
|
|
||||||
|
|
||||||
import (
|
|
||||||
protoV2 "google.golang.org/protobuf/proto"
|
|
||||||
"google.golang.org/protobuf/reflect/protoreflect"
|
|
||||||
"google.golang.org/protobuf/runtime/protoiface"
|
|
||||||
"google.golang.org/protobuf/runtime/protoimpl"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
ProtoPackageIsVersion1 = true
|
|
||||||
ProtoPackageIsVersion2 = true
|
|
||||||
ProtoPackageIsVersion3 = true
|
|
||||||
ProtoPackageIsVersion4 = true
|
|
||||||
)
|
|
||||||
|
|
||||||
// GeneratedEnum is any enum type generated by protoc-gen-go
|
|
||||||
// which is a named int32 kind.
|
|
||||||
// This type exists for documentation purposes.
|
|
||||||
type GeneratedEnum interface{}
|
|
||||||
|
|
||||||
// GeneratedMessage is any message type generated by protoc-gen-go
|
|
||||||
// which is a pointer to a named struct kind.
|
|
||||||
// This type exists for documentation purposes.
|
|
||||||
type GeneratedMessage interface{}
|
|
||||||
|
|
||||||
// Message is a protocol buffer message.
|
|
||||||
//
|
|
||||||
// This is the v1 version of the message interface and is marginally better
|
|
||||||
// than an empty interface as it lacks any method to programatically interact
|
|
||||||
// with the contents of the message.
|
|
||||||
//
|
|
||||||
// A v2 message is declared in "google.golang.org/protobuf/proto".Message and
|
|
||||||
// exposes protobuf reflection as a first-class feature of the interface.
|
|
||||||
//
|
|
||||||
// To convert a v1 message to a v2 message, use the MessageV2 function.
|
|
||||||
// To convert a v2 message to a v1 message, use the MessageV1 function.
|
|
||||||
type Message = protoiface.MessageV1
|
|
||||||
|
|
||||||
// MessageV1 converts either a v1 or v2 message to a v1 message.
|
|
||||||
// It returns nil if m is nil.
|
|
||||||
func MessageV1(m GeneratedMessage) protoiface.MessageV1 {
|
|
||||||
return protoimpl.X.ProtoMessageV1Of(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MessageV2 converts either a v1 or v2 message to a v2 message.
|
|
||||||
// It returns nil if m is nil.
|
|
||||||
func MessageV2(m GeneratedMessage) protoV2.Message {
|
|
||||||
return protoimpl.X.ProtoMessageV2Of(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MessageReflect returns a reflective view for a message.
|
|
||||||
// It returns nil if m is nil.
|
|
||||||
func MessageReflect(m Message) protoreflect.Message {
|
|
||||||
return protoimpl.X.MessageOf(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Marshaler is implemented by messages that can marshal themselves.
|
|
||||||
// This interface is used by the following functions: Size, Marshal,
|
|
||||||
// Buffer.Marshal, and Buffer.EncodeMessage.
|
|
||||||
//
|
|
||||||
// Deprecated: Do not implement.
|
|
||||||
type Marshaler interface {
|
|
||||||
// Marshal formats the encoded bytes of the message.
|
|
||||||
// It should be deterministic and emit valid protobuf wire data.
|
|
||||||
// The caller takes ownership of the returned buffer.
|
|
||||||
Marshal() ([]byte, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmarshaler is implemented by messages that can unmarshal themselves.
|
|
||||||
// This interface is used by the following functions: Unmarshal, UnmarshalMerge,
|
|
||||||
// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup.
|
|
||||||
//
|
|
||||||
// Deprecated: Do not implement.
|
|
||||||
type Unmarshaler interface {
|
|
||||||
// Unmarshal parses the encoded bytes of the protobuf wire input.
|
|
||||||
// The provided buffer is only valid for during method call.
|
|
||||||
// It should not reset the receiver message.
|
|
||||||
Unmarshal([]byte) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Merger is implemented by messages that can merge themselves.
|
|
||||||
// This interface is used by the following functions: Clone and Merge.
|
|
||||||
//
|
|
||||||
// Deprecated: Do not implement.
|
|
||||||
type Merger interface {
|
|
||||||
// Merge merges the contents of src into the receiver message.
|
|
||||||
// It clones all data structures in src such that it aliases no mutable
|
|
||||||
// memory referenced by src.
|
|
||||||
Merge(src Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RequiredNotSetError is an error type returned when
|
|
||||||
// marshaling or unmarshaling a message with missing required fields.
|
|
||||||
type RequiredNotSetError struct {
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *RequiredNotSetError) Error() string {
|
|
||||||
if e.err != nil {
|
|
||||||
return e.err.Error()
|
|
||||||
}
|
|
||||||
return "proto: required field not set"
|
|
||||||
}
|
|
||||||
func (e *RequiredNotSetError) RequiredNotSet() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkRequiredNotSet(m protoV2.Message) error {
|
|
||||||
if err := protoV2.CheckInitialized(m); err != nil {
|
|
||||||
return &RequiredNotSetError{err: err}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clone returns a deep copy of src.
|
|
||||||
func Clone(src Message) Message {
|
|
||||||
return MessageV1(protoV2.Clone(MessageV2(src)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Merge merges src into dst, which must be messages of the same type.
|
|
||||||
//
|
|
||||||
// Populated scalar fields in src are copied to dst, while populated
|
|
||||||
// singular messages in src are merged into dst by recursively calling Merge.
|
|
||||||
// The elements of every list field in src is appended to the corresponded
|
|
||||||
// list fields in dst. The entries of every map field in src is copied into
|
|
||||||
// the corresponding map field in dst, possibly replacing existing entries.
|
|
||||||
// The unknown fields of src are appended to the unknown fields of dst.
|
|
||||||
func Merge(dst, src Message) {
|
|
||||||
protoV2.Merge(MessageV2(dst), MessageV2(src))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Equal reports whether two messages are equal.
|
|
||||||
// If two messages marshal to the same bytes under deterministic serialization,
|
|
||||||
// then Equal is guaranteed to report true.
|
|
||||||
//
|
|
||||||
// Two messages are equal if they are the same protobuf message type,
|
|
||||||
// have the same set of populated known and extension field values,
|
|
||||||
// and the same set of unknown fields values.
|
|
||||||
//
|
|
||||||
// Scalar values are compared with the equivalent of the == operator in Go,
|
|
||||||
// except bytes values which are compared using bytes.Equal and
|
|
||||||
// floating point values which specially treat NaNs as equal.
|
|
||||||
// Message values are compared by recursively calling Equal.
|
|
||||||
// Lists are equal if each element value is also equal.
|
|
||||||
// Maps are equal if they have the same set of keys, where the pair of values
|
|
||||||
// for each key is also equal.
|
|
||||||
func Equal(x, y Message) bool {
|
|
||||||
return protoV2.Equal(MessageV2(x), MessageV2(y))
|
|
||||||
}
|
|
||||||
|
|
||||||
func isMessageSet(md protoreflect.MessageDescriptor) bool {
|
|
||||||
ms, ok := md.(interface{ IsMessageSet() bool })
|
|
||||||
return ok && ms.IsMessageSet()
|
|
||||||
}
|
|
317
vendor/github.com/golang/protobuf/proto/registry.go
generated
vendored
317
vendor/github.com/golang/protobuf/proto/registry.go
generated
vendored
@ -1,317 +0,0 @@
|
|||||||
// Copyright 2019 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"compress/gzip"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"google.golang.org/protobuf/reflect/protodesc"
|
|
||||||
"google.golang.org/protobuf/reflect/protoreflect"
|
|
||||||
"google.golang.org/protobuf/reflect/protoregistry"
|
|
||||||
"google.golang.org/protobuf/runtime/protoimpl"
|
|
||||||
)
|
|
||||||
|
|
||||||
// filePath is the path to the proto source file.
|
|
||||||
type filePath = string // e.g., "google/protobuf/descriptor.proto"
|
|
||||||
|
|
||||||
// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto.
|
|
||||||
type fileDescGZIP = []byte
|
|
||||||
|
|
||||||
var fileCache sync.Map // map[filePath]fileDescGZIP
|
|
||||||
|
|
||||||
// RegisterFile is called from generated code to register the compressed
|
|
||||||
// FileDescriptorProto with the file path for a proto source file.
|
|
||||||
//
|
|
||||||
// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead.
|
|
||||||
func RegisterFile(s filePath, d fileDescGZIP) {
|
|
||||||
// Decompress the descriptor.
|
|
||||||
zr, err := gzip.NewReader(bytes.NewReader(d))
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
|
|
||||||
}
|
|
||||||
b, err := ioutil.ReadAll(zr)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Construct a protoreflect.FileDescriptor from the raw descriptor.
|
|
||||||
// Note that DescBuilder.Build automatically registers the constructed
|
|
||||||
// file descriptor with the v2 registry.
|
|
||||||
protoimpl.DescBuilder{RawDescriptor: b}.Build()
|
|
||||||
|
|
||||||
// Locally cache the raw descriptor form for the file.
|
|
||||||
fileCache.Store(s, d)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileDescriptor returns the compressed FileDescriptorProto given the file path
|
|
||||||
// for a proto source file. It returns nil if not found.
|
|
||||||
//
|
|
||||||
// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead.
|
|
||||||
func FileDescriptor(s filePath) fileDescGZIP {
|
|
||||||
if v, ok := fileCache.Load(s); ok {
|
|
||||||
return v.(fileDescGZIP)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find the descriptor in the v2 registry.
|
|
||||||
var b []byte
|
|
||||||
if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil {
|
|
||||||
b, _ = Marshal(protodesc.ToFileDescriptorProto(fd))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Locally cache the raw descriptor form for the file.
|
|
||||||
if len(b) > 0 {
|
|
||||||
v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b))
|
|
||||||
return v.(fileDescGZIP)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// enumName is the name of an enum. For historical reasons, the enum name is
|
|
||||||
// neither the full Go name nor the full protobuf name of the enum.
|
|
||||||
// The name is the dot-separated combination of just the proto package that the
|
|
||||||
// enum is declared within followed by the Go type name of the generated enum.
|
|
||||||
type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum"
|
|
||||||
|
|
||||||
// enumsByName maps enum values by name to their numeric counterpart.
|
|
||||||
type enumsByName = map[string]int32
|
|
||||||
|
|
||||||
// enumsByNumber maps enum values by number to their name counterpart.
|
|
||||||
type enumsByNumber = map[int32]string
|
|
||||||
|
|
||||||
var enumCache sync.Map // map[enumName]enumsByName
|
|
||||||
var numFilesCache sync.Map // map[protoreflect.FullName]int
|
|
||||||
|
|
||||||
// RegisterEnum is called from the generated code to register the mapping of
|
|
||||||
// enum value names to enum numbers for the enum identified by s.
|
|
||||||
//
|
|
||||||
// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead.
|
|
||||||
func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) {
|
|
||||||
if _, ok := enumCache.Load(s); ok {
|
|
||||||
panic("proto: duplicate enum registered: " + s)
|
|
||||||
}
|
|
||||||
enumCache.Store(s, m)
|
|
||||||
|
|
||||||
// This does not forward registration to the v2 registry since this API
|
|
||||||
// lacks sufficient information to construct a complete v2 enum descriptor.
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnumValueMap returns the mapping from enum value names to enum numbers for
|
|
||||||
// the enum of the given name. It returns nil if not found.
|
|
||||||
//
|
|
||||||
// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead.
|
|
||||||
func EnumValueMap(s enumName) enumsByName {
|
|
||||||
if v, ok := enumCache.Load(s); ok {
|
|
||||||
return v.(enumsByName)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check whether the cache is stale. If the number of files in the current
|
|
||||||
// package differs, then it means that some enums may have been recently
|
|
||||||
// registered upstream that we do not know about.
|
|
||||||
var protoPkg protoreflect.FullName
|
|
||||||
if i := strings.LastIndexByte(s, '.'); i >= 0 {
|
|
||||||
protoPkg = protoreflect.FullName(s[:i])
|
|
||||||
}
|
|
||||||
v, _ := numFilesCache.Load(protoPkg)
|
|
||||||
numFiles, _ := v.(int)
|
|
||||||
if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles {
|
|
||||||
return nil // cache is up-to-date; was not found earlier
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update the enum cache for all enums declared in the given proto package.
|
|
||||||
numFiles = 0
|
|
||||||
protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool {
|
|
||||||
walkEnums(fd, func(ed protoreflect.EnumDescriptor) {
|
|
||||||
name := protoimpl.X.LegacyEnumName(ed)
|
|
||||||
if _, ok := enumCache.Load(name); !ok {
|
|
||||||
m := make(enumsByName)
|
|
||||||
evs := ed.Values()
|
|
||||||
for i := evs.Len() - 1; i >= 0; i-- {
|
|
||||||
ev := evs.Get(i)
|
|
||||||
m[string(ev.Name())] = int32(ev.Number())
|
|
||||||
}
|
|
||||||
enumCache.LoadOrStore(name, m)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
numFiles++
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
numFilesCache.Store(protoPkg, numFiles)
|
|
||||||
|
|
||||||
// Check cache again for enum map.
|
|
||||||
if v, ok := enumCache.Load(s); ok {
|
|
||||||
return v.(enumsByName)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// walkEnums recursively walks all enums declared in d.
|
|
||||||
func walkEnums(d interface {
|
|
||||||
Enums() protoreflect.EnumDescriptors
|
|
||||||
Messages() protoreflect.MessageDescriptors
|
|
||||||
}, f func(protoreflect.EnumDescriptor)) {
|
|
||||||
eds := d.Enums()
|
|
||||||
for i := eds.Len() - 1; i >= 0; i-- {
|
|
||||||
f(eds.Get(i))
|
|
||||||
}
|
|
||||||
mds := d.Messages()
|
|
||||||
for i := mds.Len() - 1; i >= 0; i-- {
|
|
||||||
walkEnums(mds.Get(i), f)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// messageName is the full name of protobuf message.
|
|
||||||
type messageName = string
|
|
||||||
|
|
||||||
var messageTypeCache sync.Map // map[messageName]reflect.Type
|
|
||||||
|
|
||||||
// RegisterType is called from generated code to register the message Go type
|
|
||||||
// for a message of the given name.
|
|
||||||
//
|
|
||||||
// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead.
|
|
||||||
func RegisterType(m Message, s messageName) {
|
|
||||||
mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s))
|
|
||||||
if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
messageTypeCache.Store(s, reflect.TypeOf(m))
|
|
||||||
}
|
|
||||||
|
|
||||||
// RegisterMapType is called from generated code to register the Go map type
|
|
||||||
// for a protobuf message representing a map entry.
|
|
||||||
//
|
|
||||||
// Deprecated: Do not use.
|
|
||||||
func RegisterMapType(m interface{}, s messageName) {
|
|
||||||
t := reflect.TypeOf(m)
|
|
||||||
if t.Kind() != reflect.Map {
|
|
||||||
panic(fmt.Sprintf("invalid map kind: %v", t))
|
|
||||||
}
|
|
||||||
if _, ok := messageTypeCache.Load(s); ok {
|
|
||||||
panic(fmt.Errorf("proto: duplicate proto message registered: %s", s))
|
|
||||||
}
|
|
||||||
messageTypeCache.Store(s, t)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MessageType returns the message type for a named message.
|
|
||||||
// It returns nil if not found.
|
|
||||||
//
|
|
||||||
// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead.
|
|
||||||
func MessageType(s messageName) reflect.Type {
|
|
||||||
if v, ok := messageTypeCache.Load(s); ok {
|
|
||||||
return v.(reflect.Type)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Derive the message type from the v2 registry.
|
|
||||||
var t reflect.Type
|
|
||||||
if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil {
|
|
||||||
t = messageGoType(mt)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we could not get a concrete type, it is possible that it is a
|
|
||||||
// pseudo-message for a map entry.
|
|
||||||
if t == nil {
|
|
||||||
d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s))
|
|
||||||
if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() {
|
|
||||||
kt := goTypeForField(md.Fields().ByNumber(1))
|
|
||||||
vt := goTypeForField(md.Fields().ByNumber(2))
|
|
||||||
t = reflect.MapOf(kt, vt)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Locally cache the message type for the given name.
|
|
||||||
if t != nil {
|
|
||||||
v, _ := messageTypeCache.LoadOrStore(s, t)
|
|
||||||
return v.(reflect.Type)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type {
|
|
||||||
switch k := fd.Kind(); k {
|
|
||||||
case protoreflect.EnumKind:
|
|
||||||
if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil {
|
|
||||||
return enumGoType(et)
|
|
||||||
}
|
|
||||||
return reflect.TypeOf(protoreflect.EnumNumber(0))
|
|
||||||
case protoreflect.MessageKind, protoreflect.GroupKind:
|
|
||||||
if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil {
|
|
||||||
return messageGoType(mt)
|
|
||||||
}
|
|
||||||
return reflect.TypeOf((*protoreflect.Message)(nil)).Elem()
|
|
||||||
default:
|
|
||||||
return reflect.TypeOf(fd.Default().Interface())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func enumGoType(et protoreflect.EnumType) reflect.Type {
|
|
||||||
return reflect.TypeOf(et.New(0))
|
|
||||||
}
|
|
||||||
|
|
||||||
func messageGoType(mt protoreflect.MessageType) reflect.Type {
|
|
||||||
return reflect.TypeOf(MessageV1(mt.Zero().Interface()))
|
|
||||||
}
|
|
||||||
|
|
||||||
// MessageName returns the full protobuf name for the given message type.
|
|
||||||
//
|
|
||||||
// Deprecated: Use protoreflect.MessageDescriptor.FullName instead.
|
|
||||||
func MessageName(m Message) messageName {
|
|
||||||
if m == nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
if m, ok := m.(interface{ XXX_MessageName() messageName }); ok {
|
|
||||||
return m.XXX_MessageName()
|
|
||||||
}
|
|
||||||
return messageName(protoimpl.X.MessageDescriptorOf(m).FullName())
|
|
||||||
}
|
|
||||||
|
|
||||||
// RegisterExtension is called from the generated code to register
|
|
||||||
// the extension descriptor.
|
|
||||||
//
|
|
||||||
// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead.
|
|
||||||
func RegisterExtension(d *ExtensionDesc) {
|
|
||||||
if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type extensionsByNumber = map[int32]*ExtensionDesc
|
|
||||||
|
|
||||||
var extensionCache sync.Map // map[messageName]extensionsByNumber
|
|
||||||
|
|
||||||
// RegisteredExtensions returns a map of the registered extensions for the
|
|
||||||
// provided protobuf message, indexed by the extension field number.
|
|
||||||
//
|
|
||||||
// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead.
|
|
||||||
func RegisteredExtensions(m Message) extensionsByNumber {
|
|
||||||
// Check whether the cache is stale. If the number of extensions for
|
|
||||||
// the given message differs, then it means that some extensions were
|
|
||||||
// recently registered upstream that we do not know about.
|
|
||||||
s := MessageName(m)
|
|
||||||
v, _ := extensionCache.Load(s)
|
|
||||||
xs, _ := v.(extensionsByNumber)
|
|
||||||
if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) {
|
|
||||||
return xs // cache is up-to-date
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cache is stale, re-compute the extensions map.
|
|
||||||
xs = make(extensionsByNumber)
|
|
||||||
protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool {
|
|
||||||
if xd, ok := xt.(*ExtensionDesc); ok {
|
|
||||||
xs[int32(xt.TypeDescriptor().Number())] = xd
|
|
||||||
} else {
|
|
||||||
// TODO: This implies that the protoreflect.ExtensionType is a
|
|
||||||
// custom type not generated by protoc-gen-go. We could try and
|
|
||||||
// convert the type to an ExtensionDesc.
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
extensionCache.Store(s, xs)
|
|
||||||
return xs
|
|
||||||
}
|
|
801
vendor/github.com/golang/protobuf/proto/text_decode.go
generated
vendored
801
vendor/github.com/golang/protobuf/proto/text_decode.go
generated
vendored
@ -1,801 +0,0 @@
|
|||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"unicode/utf8"
|
|
||||||
|
|
||||||
"google.golang.org/protobuf/encoding/prototext"
|
|
||||||
protoV2 "google.golang.org/protobuf/proto"
|
|
||||||
"google.golang.org/protobuf/reflect/protoreflect"
|
|
||||||
"google.golang.org/protobuf/reflect/protoregistry"
|
|
||||||
)
|
|
||||||
|
|
||||||
const wrapTextUnmarshalV2 = false
|
|
||||||
|
|
||||||
// ParseError is returned by UnmarshalText.
|
|
||||||
type ParseError struct {
|
|
||||||
Message string
|
|
||||||
|
|
||||||
// Deprecated: Do not use.
|
|
||||||
Line, Offset int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *ParseError) Error() string {
|
|
||||||
if wrapTextUnmarshalV2 {
|
|
||||||
return e.Message
|
|
||||||
}
|
|
||||||
if e.Line == 1 {
|
|
||||||
return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message)
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("line %d: %v", e.Line, e.Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalText parses a proto text formatted string into m.
|
|
||||||
func UnmarshalText(s string, m Message) error {
|
|
||||||
if u, ok := m.(encoding.TextUnmarshaler); ok {
|
|
||||||
return u.UnmarshalText([]byte(s))
|
|
||||||
}
|
|
||||||
|
|
||||||
m.Reset()
|
|
||||||
mi := MessageV2(m)
|
|
||||||
|
|
||||||
if wrapTextUnmarshalV2 {
|
|
||||||
err := prototext.UnmarshalOptions{
|
|
||||||
AllowPartial: true,
|
|
||||||
}.Unmarshal([]byte(s), mi)
|
|
||||||
if err != nil {
|
|
||||||
return &ParseError{Message: err.Error()}
|
|
||||||
}
|
|
||||||
return checkRequiredNotSet(mi)
|
|
||||||
} else {
|
|
||||||
if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return checkRequiredNotSet(mi)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type textParser struct {
|
|
||||||
s string // remaining input
|
|
||||||
done bool // whether the parsing is finished (success or error)
|
|
||||||
backed bool // whether back() was called
|
|
||||||
offset, line int
|
|
||||||
cur token
|
|
||||||
}
|
|
||||||
|
|
||||||
type token struct {
|
|
||||||
value string
|
|
||||||
err *ParseError
|
|
||||||
line int // line number
|
|
||||||
offset int // byte number from start of input, not start of line
|
|
||||||
unquoted string // the unquoted version of value, if it was a quoted string
|
|
||||||
}
|
|
||||||
|
|
||||||
func newTextParser(s string) *textParser {
|
|
||||||
p := new(textParser)
|
|
||||||
p.s = s
|
|
||||||
p.line = 1
|
|
||||||
p.cur.line = 1
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) {
|
|
||||||
md := m.Descriptor()
|
|
||||||
fds := md.Fields()
|
|
||||||
|
|
||||||
// A struct is a sequence of "name: value", terminated by one of
|
|
||||||
// '>' or '}', or the end of the input. A name may also be
|
|
||||||
// "[extension]" or "[type/url]".
|
|
||||||
//
|
|
||||||
// The whole struct can also be an expanded Any message, like:
|
|
||||||
// [type/url] < ... struct contents ... >
|
|
||||||
seen := make(map[protoreflect.FieldNumber]bool)
|
|
||||||
for {
|
|
||||||
tok := p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return tok.err
|
|
||||||
}
|
|
||||||
if tok.value == terminator {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if tok.value == "[" {
|
|
||||||
if err := p.unmarshalExtensionOrAny(m, seen); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is a normal, non-extension field.
|
|
||||||
name := protoreflect.Name(tok.value)
|
|
||||||
fd := fds.ByName(name)
|
|
||||||
switch {
|
|
||||||
case fd == nil:
|
|
||||||
gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name))))
|
|
||||||
if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name {
|
|
||||||
fd = gd
|
|
||||||
}
|
|
||||||
case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name:
|
|
||||||
fd = nil
|
|
||||||
case fd.IsWeak() && fd.Message().IsPlaceholder():
|
|
||||||
fd = nil
|
|
||||||
}
|
|
||||||
if fd == nil {
|
|
||||||
typeName := string(md.FullName())
|
|
||||||
if m, ok := m.Interface().(Message); ok {
|
|
||||||
t := reflect.TypeOf(m)
|
|
||||||
if t.Kind() == reflect.Ptr {
|
|
||||||
typeName = t.Elem().String()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return p.errorf("unknown field name %q in %v", name, typeName)
|
|
||||||
}
|
|
||||||
if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil {
|
|
||||||
return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name())
|
|
||||||
}
|
|
||||||
if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] {
|
|
||||||
return p.errorf("non-repeated field %q was repeated", fd.Name())
|
|
||||||
}
|
|
||||||
seen[fd.Number()] = true
|
|
||||||
|
|
||||||
// Consume any colon.
|
|
||||||
if err := p.checkForColon(fd); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse into the field.
|
|
||||||
v := m.Get(fd)
|
|
||||||
if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
|
|
||||||
v = m.Mutable(fd)
|
|
||||||
}
|
|
||||||
if v, err = p.unmarshalValue(v, fd); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
m.Set(fd, v)
|
|
||||||
|
|
||||||
if err := p.consumeOptionalSeparator(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error {
|
|
||||||
name, err := p.consumeExtensionOrAnyName()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// If it contains a slash, it's an Any type URL.
|
|
||||||
if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 {
|
|
||||||
tok := p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return tok.err
|
|
||||||
}
|
|
||||||
// consume an optional colon
|
|
||||||
if tok.value == ":" {
|
|
||||||
tok = p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return tok.err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var terminator string
|
|
||||||
switch tok.value {
|
|
||||||
case "<":
|
|
||||||
terminator = ">"
|
|
||||||
case "{":
|
|
||||||
terminator = "}"
|
|
||||||
default:
|
|
||||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
|
||||||
}
|
|
||||||
|
|
||||||
mt, err := protoregistry.GlobalTypes.FindMessageByURL(name)
|
|
||||||
if err != nil {
|
|
||||||
return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):])
|
|
||||||
}
|
|
||||||
m2 := mt.New()
|
|
||||||
if err := p.unmarshalMessage(m2, terminator); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
b, err := protoV2.Marshal(m2.Interface())
|
|
||||||
if err != nil {
|
|
||||||
return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err)
|
|
||||||
}
|
|
||||||
|
|
||||||
urlFD := m.Descriptor().Fields().ByName("type_url")
|
|
||||||
valFD := m.Descriptor().Fields().ByName("value")
|
|
||||||
if seen[urlFD.Number()] {
|
|
||||||
return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name())
|
|
||||||
}
|
|
||||||
if seen[valFD.Number()] {
|
|
||||||
return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name())
|
|
||||||
}
|
|
||||||
m.Set(urlFD, protoreflect.ValueOfString(name))
|
|
||||||
m.Set(valFD, protoreflect.ValueOfBytes(b))
|
|
||||||
seen[urlFD.Number()] = true
|
|
||||||
seen[valFD.Number()] = true
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
xname := protoreflect.FullName(name)
|
|
||||||
xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
|
|
||||||
if xt == nil && isMessageSet(m.Descriptor()) {
|
|
||||||
xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
|
|
||||||
}
|
|
||||||
if xt == nil {
|
|
||||||
return p.errorf("unrecognized extension %q", name)
|
|
||||||
}
|
|
||||||
fd := xt.TypeDescriptor()
|
|
||||||
if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
|
|
||||||
return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName())
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := p.checkForColon(fd); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
v := m.Get(fd)
|
|
||||||
if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
|
|
||||||
v = m.Mutable(fd)
|
|
||||||
}
|
|
||||||
v, err = p.unmarshalValue(v, fd)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
m.Set(fd, v)
|
|
||||||
return p.consumeOptionalSeparator()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
|
|
||||||
tok := p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return v, tok.err
|
|
||||||
}
|
|
||||||
if tok.value == "" {
|
|
||||||
return v, p.errorf("unexpected EOF")
|
|
||||||
}
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case fd.IsList():
|
|
||||||
lv := v.List()
|
|
||||||
var err error
|
|
||||||
if tok.value == "[" {
|
|
||||||
// Repeated field with list notation, like [1,2,3].
|
|
||||||
for {
|
|
||||||
vv := lv.NewElement()
|
|
||||||
vv, err = p.unmarshalSingularValue(vv, fd)
|
|
||||||
if err != nil {
|
|
||||||
return v, err
|
|
||||||
}
|
|
||||||
lv.Append(vv)
|
|
||||||
|
|
||||||
tok := p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return v, tok.err
|
|
||||||
}
|
|
||||||
if tok.value == "]" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if tok.value != "," {
|
|
||||||
return v, p.errorf("Expected ']' or ',' found %q", tok.value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// One value of the repeated field.
|
|
||||||
p.back()
|
|
||||||
vv := lv.NewElement()
|
|
||||||
vv, err = p.unmarshalSingularValue(vv, fd)
|
|
||||||
if err != nil {
|
|
||||||
return v, err
|
|
||||||
}
|
|
||||||
lv.Append(vv)
|
|
||||||
return v, nil
|
|
||||||
case fd.IsMap():
|
|
||||||
// The map entry should be this sequence of tokens:
|
|
||||||
// < key : KEY value : VALUE >
|
|
||||||
// However, implementations may omit key or value, and technically
|
|
||||||
// we should support them in any order.
|
|
||||||
var terminator string
|
|
||||||
switch tok.value {
|
|
||||||
case "<":
|
|
||||||
terminator = ">"
|
|
||||||
case "{":
|
|
||||||
terminator = "}"
|
|
||||||
default:
|
|
||||||
return v, p.errorf("expected '{' or '<', found %q", tok.value)
|
|
||||||
}
|
|
||||||
|
|
||||||
keyFD := fd.MapKey()
|
|
||||||
valFD := fd.MapValue()
|
|
||||||
|
|
||||||
mv := v.Map()
|
|
||||||
kv := keyFD.Default()
|
|
||||||
vv := mv.NewValue()
|
|
||||||
for {
|
|
||||||
tok := p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return v, tok.err
|
|
||||||
}
|
|
||||||
if tok.value == terminator {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
var err error
|
|
||||||
switch tok.value {
|
|
||||||
case "key":
|
|
||||||
if err := p.consumeToken(":"); err != nil {
|
|
||||||
return v, err
|
|
||||||
}
|
|
||||||
if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil {
|
|
||||||
return v, err
|
|
||||||
}
|
|
||||||
if err := p.consumeOptionalSeparator(); err != nil {
|
|
||||||
return v, err
|
|
||||||
}
|
|
||||||
case "value":
|
|
||||||
if err := p.checkForColon(valFD); err != nil {
|
|
||||||
return v, err
|
|
||||||
}
|
|
||||||
if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil {
|
|
||||||
return v, err
|
|
||||||
}
|
|
||||||
if err := p.consumeOptionalSeparator(); err != nil {
|
|
||||||
return v, err
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
p.back()
|
|
||||||
return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
mv.Set(kv.MapKey(), vv)
|
|
||||||
return v, nil
|
|
||||||
default:
|
|
||||||
p.back()
|
|
||||||
return p.unmarshalSingularValue(v, fd)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
|
|
||||||
tok := p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return v, tok.err
|
|
||||||
}
|
|
||||||
if tok.value == "" {
|
|
||||||
return v, p.errorf("unexpected EOF")
|
|
||||||
}
|
|
||||||
|
|
||||||
switch fd.Kind() {
|
|
||||||
case protoreflect.BoolKind:
|
|
||||||
switch tok.value {
|
|
||||||
case "true", "1", "t", "True":
|
|
||||||
return protoreflect.ValueOfBool(true), nil
|
|
||||||
case "false", "0", "f", "False":
|
|
||||||
return protoreflect.ValueOfBool(false), nil
|
|
||||||
}
|
|
||||||
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
|
|
||||||
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
|
|
||||||
return protoreflect.ValueOfInt32(int32(x)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// The C++ parser accepts large positive hex numbers that uses
|
|
||||||
// two's complement arithmetic to represent negative numbers.
|
|
||||||
// This feature is here for backwards compatibility with C++.
|
|
||||||
if strings.HasPrefix(tok.value, "0x") {
|
|
||||||
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
|
|
||||||
return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
|
|
||||||
if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
|
|
||||||
return protoreflect.ValueOfInt64(int64(x)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// The C++ parser accepts large positive hex numbers that uses
|
|
||||||
// two's complement arithmetic to represent negative numbers.
|
|
||||||
// This feature is here for backwards compatibility with C++.
|
|
||||||
if strings.HasPrefix(tok.value, "0x") {
|
|
||||||
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
|
|
||||||
return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
|
|
||||||
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
|
|
||||||
return protoreflect.ValueOfUint32(uint32(x)), nil
|
|
||||||
}
|
|
||||||
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
|
|
||||||
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
|
|
||||||
return protoreflect.ValueOfUint64(uint64(x)), nil
|
|
||||||
}
|
|
||||||
case protoreflect.FloatKind:
|
|
||||||
// Ignore 'f' for compatibility with output generated by C++,
|
|
||||||
// but don't remove 'f' when the value is "-inf" or "inf".
|
|
||||||
v := tok.value
|
|
||||||
if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
|
|
||||||
v = v[:len(v)-len("f")]
|
|
||||||
}
|
|
||||||
if x, err := strconv.ParseFloat(v, 32); err == nil {
|
|
||||||
return protoreflect.ValueOfFloat32(float32(x)), nil
|
|
||||||
}
|
|
||||||
case protoreflect.DoubleKind:
|
|
||||||
// Ignore 'f' for compatibility with output generated by C++,
|
|
||||||
// but don't remove 'f' when the value is "-inf" or "inf".
|
|
||||||
v := tok.value
|
|
||||||
if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
|
|
||||||
v = v[:len(v)-len("f")]
|
|
||||||
}
|
|
||||||
if x, err := strconv.ParseFloat(v, 64); err == nil {
|
|
||||||
return protoreflect.ValueOfFloat64(float64(x)), nil
|
|
||||||
}
|
|
||||||
case protoreflect.StringKind:
|
|
||||||
if isQuote(tok.value[0]) {
|
|
||||||
return protoreflect.ValueOfString(tok.unquoted), nil
|
|
||||||
}
|
|
||||||
case protoreflect.BytesKind:
|
|
||||||
if isQuote(tok.value[0]) {
|
|
||||||
return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil
|
|
||||||
}
|
|
||||||
case protoreflect.EnumKind:
|
|
||||||
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
|
|
||||||
return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil
|
|
||||||
}
|
|
||||||
vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value))
|
|
||||||
if vd != nil {
|
|
||||||
return protoreflect.ValueOfEnum(vd.Number()), nil
|
|
||||||
}
|
|
||||||
case protoreflect.MessageKind, protoreflect.GroupKind:
|
|
||||||
var terminator string
|
|
||||||
switch tok.value {
|
|
||||||
case "{":
|
|
||||||
terminator = "}"
|
|
||||||
case "<":
|
|
||||||
terminator = ">"
|
|
||||||
default:
|
|
||||||
return v, p.errorf("expected '{' or '<', found %q", tok.value)
|
|
||||||
}
|
|
||||||
err := p.unmarshalMessage(v.Message(), terminator)
|
|
||||||
return v, err
|
|
||||||
default:
|
|
||||||
panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
|
|
||||||
}
|
|
||||||
return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Consume a ':' from the input stream (if the next token is a colon),
|
|
||||||
// returning an error if a colon is needed but not present.
|
|
||||||
func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError {
|
|
||||||
tok := p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return tok.err
|
|
||||||
}
|
|
||||||
if tok.value != ":" {
|
|
||||||
if fd.Message() == nil {
|
|
||||||
return p.errorf("expected ':', found %q", tok.value)
|
|
||||||
}
|
|
||||||
p.back()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// consumeExtensionOrAnyName consumes an extension name or an Any type URL and
|
|
||||||
// the following ']'. It returns the name or URL consumed.
|
|
||||||
func (p *textParser) consumeExtensionOrAnyName() (string, error) {
|
|
||||||
tok := p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return "", tok.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// If extension name or type url is quoted, it's a single token.
|
|
||||||
if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
|
|
||||||
name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return name, p.consumeToken("]")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Consume everything up to "]"
|
|
||||||
var parts []string
|
|
||||||
for tok.value != "]" {
|
|
||||||
parts = append(parts, tok.value)
|
|
||||||
tok = p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
|
|
||||||
}
|
|
||||||
if p.done && tok.value != "]" {
|
|
||||||
return "", p.errorf("unclosed type_url or extension name")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return strings.Join(parts, ""), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// consumeOptionalSeparator consumes an optional semicolon or comma.
|
|
||||||
// It is used in unmarshalMessage to provide backward compatibility.
|
|
||||||
func (p *textParser) consumeOptionalSeparator() error {
|
|
||||||
tok := p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return tok.err
|
|
||||||
}
|
|
||||||
if tok.value != ";" && tok.value != "," {
|
|
||||||
p.back()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
|
|
||||||
pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
|
|
||||||
p.cur.err = pe
|
|
||||||
p.done = true
|
|
||||||
return pe
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *textParser) skipWhitespace() {
|
|
||||||
i := 0
|
|
||||||
for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
|
|
||||||
if p.s[i] == '#' {
|
|
||||||
// comment; skip to end of line or input
|
|
||||||
for i < len(p.s) && p.s[i] != '\n' {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
if i == len(p.s) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if p.s[i] == '\n' {
|
|
||||||
p.line++
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
p.offset += i
|
|
||||||
p.s = p.s[i:len(p.s)]
|
|
||||||
if len(p.s) == 0 {
|
|
||||||
p.done = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *textParser) advance() {
|
|
||||||
// Skip whitespace
|
|
||||||
p.skipWhitespace()
|
|
||||||
if p.done {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start of non-whitespace
|
|
||||||
p.cur.err = nil
|
|
||||||
p.cur.offset, p.cur.line = p.offset, p.line
|
|
||||||
p.cur.unquoted = ""
|
|
||||||
switch p.s[0] {
|
|
||||||
case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
|
|
||||||
// Single symbol
|
|
||||||
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
|
|
||||||
case '"', '\'':
|
|
||||||
// Quoted string
|
|
||||||
i := 1
|
|
||||||
for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
|
|
||||||
if p.s[i] == '\\' && i+1 < len(p.s) {
|
|
||||||
// skip escaped char
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
if i >= len(p.s) || p.s[i] != p.s[0] {
|
|
||||||
p.errorf("unmatched quote")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
|
|
||||||
if err != nil {
|
|
||||||
p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
|
|
||||||
p.cur.unquoted = unq
|
|
||||||
default:
|
|
||||||
i := 0
|
|
||||||
for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
if i == 0 {
|
|
||||||
p.errorf("unexpected byte %#x", p.s[0])
|
|
||||||
return
|
|
||||||
}
|
|
||||||
p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
|
|
||||||
}
|
|
||||||
p.offset += len(p.cur.value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Back off the parser by one token. Can only be done between calls to next().
|
|
||||||
// It makes the next advance() a no-op.
|
|
||||||
func (p *textParser) back() { p.backed = true }
|
|
||||||
|
|
||||||
// Advances the parser and returns the new current token.
|
|
||||||
func (p *textParser) next() *token {
|
|
||||||
if p.backed || p.done {
|
|
||||||
p.backed = false
|
|
||||||
return &p.cur
|
|
||||||
}
|
|
||||||
p.advance()
|
|
||||||
if p.done {
|
|
||||||
p.cur.value = ""
|
|
||||||
} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
|
|
||||||
// Look for multiple quoted strings separated by whitespace,
|
|
||||||
// and concatenate them.
|
|
||||||
cat := p.cur
|
|
||||||
for {
|
|
||||||
p.skipWhitespace()
|
|
||||||
if p.done || !isQuote(p.s[0]) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
p.advance()
|
|
||||||
if p.cur.err != nil {
|
|
||||||
return &p.cur
|
|
||||||
}
|
|
||||||
cat.value += " " + p.cur.value
|
|
||||||
cat.unquoted += p.cur.unquoted
|
|
||||||
}
|
|
||||||
p.done = false // parser may have seen EOF, but we want to return cat
|
|
||||||
p.cur = cat
|
|
||||||
}
|
|
||||||
return &p.cur
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *textParser) consumeToken(s string) error {
|
|
||||||
tok := p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return tok.err
|
|
||||||
}
|
|
||||||
if tok.value != s {
|
|
||||||
p.back()
|
|
||||||
return p.errorf("expected %q, found %q", s, tok.value)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var errBadUTF8 = errors.New("proto: bad UTF-8")
|
|
||||||
|
|
||||||
func unquoteC(s string, quote rune) (string, error) {
|
|
||||||
// This is based on C++'s tokenizer.cc.
|
|
||||||
// Despite its name, this is *not* parsing C syntax.
|
|
||||||
// For instance, "\0" is an invalid quoted string.
|
|
||||||
|
|
||||||
// Avoid allocation in trivial cases.
|
|
||||||
simple := true
|
|
||||||
for _, r := range s {
|
|
||||||
if r == '\\' || r == quote {
|
|
||||||
simple = false
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if simple {
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := make([]byte, 0, 3*len(s)/2)
|
|
||||||
for len(s) > 0 {
|
|
||||||
r, n := utf8.DecodeRuneInString(s)
|
|
||||||
if r == utf8.RuneError && n == 1 {
|
|
||||||
return "", errBadUTF8
|
|
||||||
}
|
|
||||||
s = s[n:]
|
|
||||||
if r != '\\' {
|
|
||||||
if r < utf8.RuneSelf {
|
|
||||||
buf = append(buf, byte(r))
|
|
||||||
} else {
|
|
||||||
buf = append(buf, string(r)...)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
ch, tail, err := unescape(s)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
buf = append(buf, ch...)
|
|
||||||
s = tail
|
|
||||||
}
|
|
||||||
return string(buf), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func unescape(s string) (ch string, tail string, err error) {
|
|
||||||
r, n := utf8.DecodeRuneInString(s)
|
|
||||||
if r == utf8.RuneError && n == 1 {
|
|
||||||
return "", "", errBadUTF8
|
|
||||||
}
|
|
||||||
s = s[n:]
|
|
||||||
switch r {
|
|
||||||
case 'a':
|
|
||||||
return "\a", s, nil
|
|
||||||
case 'b':
|
|
||||||
return "\b", s, nil
|
|
||||||
case 'f':
|
|
||||||
return "\f", s, nil
|
|
||||||
case 'n':
|
|
||||||
return "\n", s, nil
|
|
||||||
case 'r':
|
|
||||||
return "\r", s, nil
|
|
||||||
case 't':
|
|
||||||
return "\t", s, nil
|
|
||||||
case 'v':
|
|
||||||
return "\v", s, nil
|
|
||||||
case '?':
|
|
||||||
return "?", s, nil // trigraph workaround
|
|
||||||
case '\'', '"', '\\':
|
|
||||||
return string(r), s, nil
|
|
||||||
case '0', '1', '2', '3', '4', '5', '6', '7':
|
|
||||||
if len(s) < 2 {
|
|
||||||
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
|
|
||||||
}
|
|
||||||
ss := string(r) + s[:2]
|
|
||||||
s = s[2:]
|
|
||||||
i, err := strconv.ParseUint(ss, 8, 8)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
|
|
||||||
}
|
|
||||||
return string([]byte{byte(i)}), s, nil
|
|
||||||
case 'x', 'X', 'u', 'U':
|
|
||||||
var n int
|
|
||||||
switch r {
|
|
||||||
case 'x', 'X':
|
|
||||||
n = 2
|
|
||||||
case 'u':
|
|
||||||
n = 4
|
|
||||||
case 'U':
|
|
||||||
n = 8
|
|
||||||
}
|
|
||||||
if len(s) < n {
|
|
||||||
return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
|
|
||||||
}
|
|
||||||
ss := s[:n]
|
|
||||||
s = s[n:]
|
|
||||||
i, err := strconv.ParseUint(ss, 16, 64)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
|
|
||||||
}
|
|
||||||
if r == 'x' || r == 'X' {
|
|
||||||
return string([]byte{byte(i)}), s, nil
|
|
||||||
}
|
|
||||||
if i > utf8.MaxRune {
|
|
||||||
return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
|
|
||||||
}
|
|
||||||
return string(rune(i)), s, nil
|
|
||||||
}
|
|
||||||
return "", "", fmt.Errorf(`unknown escape \%c`, r)
|
|
||||||
}
|
|
||||||
|
|
||||||
func isIdentOrNumberChar(c byte) bool {
|
|
||||||
switch {
|
|
||||||
case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
|
|
||||||
return true
|
|
||||||
case '0' <= c && c <= '9':
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
switch c {
|
|
||||||
case '-', '+', '.', '_':
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func isWhitespace(c byte) bool {
|
|
||||||
switch c {
|
|
||||||
case ' ', '\t', '\n', '\r':
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func isQuote(c byte) bool {
|
|
||||||
switch c {
|
|
||||||
case '"', '\'':
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
560
vendor/github.com/golang/protobuf/proto/text_encode.go
generated
vendored
560
vendor/github.com/golang/protobuf/proto/text_encode.go
generated
vendored
@ -1,560 +0,0 @@
|
|||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"math"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"google.golang.org/protobuf/encoding/prototext"
|
|
||||||
"google.golang.org/protobuf/encoding/protowire"
|
|
||||||
"google.golang.org/protobuf/proto"
|
|
||||||
"google.golang.org/protobuf/reflect/protoreflect"
|
|
||||||
"google.golang.org/protobuf/reflect/protoregistry"
|
|
||||||
)
|
|
||||||
|
|
||||||
const wrapTextMarshalV2 = false
|
|
||||||
|
|
||||||
// TextMarshaler is a configurable text format marshaler.
|
|
||||||
type TextMarshaler struct {
|
|
||||||
Compact bool // use compact text format (one line)
|
|
||||||
ExpandAny bool // expand google.protobuf.Any messages of known types
|
|
||||||
}
|
|
||||||
|
|
||||||
// Marshal writes the proto text format of m to w.
|
|
||||||
func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error {
|
|
||||||
b, err := tm.marshal(m)
|
|
||||||
if len(b) > 0 {
|
|
||||||
if _, err := w.Write(b); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Text returns a proto text formatted string of m.
|
|
||||||
func (tm *TextMarshaler) Text(m Message) string {
|
|
||||||
b, _ := tm.marshal(m)
|
|
||||||
return string(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tm *TextMarshaler) marshal(m Message) ([]byte, error) {
|
|
||||||
mr := MessageReflect(m)
|
|
||||||
if mr == nil || !mr.IsValid() {
|
|
||||||
return []byte("<nil>"), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if wrapTextMarshalV2 {
|
|
||||||
if m, ok := m.(encoding.TextMarshaler); ok {
|
|
||||||
return m.MarshalText()
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := prototext.MarshalOptions{
|
|
||||||
AllowPartial: true,
|
|
||||||
EmitUnknown: true,
|
|
||||||
}
|
|
||||||
if !tm.Compact {
|
|
||||||
opts.Indent = " "
|
|
||||||
}
|
|
||||||
if !tm.ExpandAny {
|
|
||||||
opts.Resolver = (*protoregistry.Types)(nil)
|
|
||||||
}
|
|
||||||
return opts.Marshal(mr.Interface())
|
|
||||||
} else {
|
|
||||||
w := &textWriter{
|
|
||||||
compact: tm.Compact,
|
|
||||||
expandAny: tm.ExpandAny,
|
|
||||||
complete: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
if m, ok := m.(encoding.TextMarshaler); ok {
|
|
||||||
b, err := m.MarshalText()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
w.Write(b)
|
|
||||||
return w.buf, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
err := w.writeMessage(mr)
|
|
||||||
return w.buf, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
defaultTextMarshaler = TextMarshaler{}
|
|
||||||
compactTextMarshaler = TextMarshaler{Compact: true}
|
|
||||||
)
|
|
||||||
|
|
||||||
// MarshalText writes the proto text format of m to w.
|
|
||||||
func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) }
|
|
||||||
|
|
||||||
// MarshalTextString returns a proto text formatted string of m.
|
|
||||||
func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) }
|
|
||||||
|
|
||||||
// CompactText writes the compact proto text format of m to w.
|
|
||||||
func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) }
|
|
||||||
|
|
||||||
// CompactTextString returns a compact proto text formatted string of m.
|
|
||||||
func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) }
|
|
||||||
|
|
||||||
var (
|
|
||||||
newline = []byte("\n")
|
|
||||||
endBraceNewline = []byte("}\n")
|
|
||||||
posInf = []byte("inf")
|
|
||||||
negInf = []byte("-inf")
|
|
||||||
nan = []byte("nan")
|
|
||||||
)
|
|
||||||
|
|
||||||
// textWriter is an io.Writer that tracks its indentation level.
|
|
||||||
type textWriter struct {
|
|
||||||
compact bool // same as TextMarshaler.Compact
|
|
||||||
expandAny bool // same as TextMarshaler.ExpandAny
|
|
||||||
complete bool // whether the current position is a complete line
|
|
||||||
indent int // indentation level; never negative
|
|
||||||
buf []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *textWriter) Write(p []byte) (n int, _ error) {
|
|
||||||
newlines := bytes.Count(p, newline)
|
|
||||||
if newlines == 0 {
|
|
||||||
if !w.compact && w.complete {
|
|
||||||
w.writeIndent()
|
|
||||||
}
|
|
||||||
w.buf = append(w.buf, p...)
|
|
||||||
w.complete = false
|
|
||||||
return len(p), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
frags := bytes.SplitN(p, newline, newlines+1)
|
|
||||||
if w.compact {
|
|
||||||
for i, frag := range frags {
|
|
||||||
if i > 0 {
|
|
||||||
w.buf = append(w.buf, ' ')
|
|
||||||
n++
|
|
||||||
}
|
|
||||||
w.buf = append(w.buf, frag...)
|
|
||||||
n += len(frag)
|
|
||||||
}
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, frag := range frags {
|
|
||||||
if w.complete {
|
|
||||||
w.writeIndent()
|
|
||||||
}
|
|
||||||
w.buf = append(w.buf, frag...)
|
|
||||||
n += len(frag)
|
|
||||||
if i+1 < len(frags) {
|
|
||||||
w.buf = append(w.buf, '\n')
|
|
||||||
n++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
w.complete = len(frags[len(frags)-1]) == 0
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *textWriter) WriteByte(c byte) error {
|
|
||||||
if w.compact && c == '\n' {
|
|
||||||
c = ' '
|
|
||||||
}
|
|
||||||
if !w.compact && w.complete {
|
|
||||||
w.writeIndent()
|
|
||||||
}
|
|
||||||
w.buf = append(w.buf, c)
|
|
||||||
w.complete = c == '\n'
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) {
|
|
||||||
if !w.compact && w.complete {
|
|
||||||
w.writeIndent()
|
|
||||||
}
|
|
||||||
w.complete = false
|
|
||||||
|
|
||||||
if fd.Kind() != protoreflect.GroupKind {
|
|
||||||
w.buf = append(w.buf, fd.Name()...)
|
|
||||||
w.WriteByte(':')
|
|
||||||
} else {
|
|
||||||
// Use message type name for group field name.
|
|
||||||
w.buf = append(w.buf, fd.Message().Name()...)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !w.compact {
|
|
||||||
w.WriteByte(' ')
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func requiresQuotes(u string) bool {
|
|
||||||
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
|
|
||||||
for _, ch := range u {
|
|
||||||
switch {
|
|
||||||
case ch == '.' || ch == '/' || ch == '_':
|
|
||||||
continue
|
|
||||||
case '0' <= ch && ch <= '9':
|
|
||||||
continue
|
|
||||||
case 'A' <= ch && ch <= 'Z':
|
|
||||||
continue
|
|
||||||
case 'a' <= ch && ch <= 'z':
|
|
||||||
continue
|
|
||||||
default:
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeProto3Any writes an expanded google.protobuf.Any message.
|
|
||||||
//
|
|
||||||
// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
|
|
||||||
// required messages are not linked in).
|
|
||||||
//
|
|
||||||
// It returns (true, error) when sv was written in expanded format or an error
|
|
||||||
// was encountered.
|
|
||||||
func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) {
|
|
||||||
md := m.Descriptor()
|
|
||||||
fdURL := md.Fields().ByName("type_url")
|
|
||||||
fdVal := md.Fields().ByName("value")
|
|
||||||
|
|
||||||
url := m.Get(fdURL).String()
|
|
||||||
mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
|
|
||||||
if err != nil {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
b := m.Get(fdVal).Bytes()
|
|
||||||
m2 := mt.New()
|
|
||||||
if err := proto.Unmarshal(b, m2.Interface()); err != nil {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
w.Write([]byte("["))
|
|
||||||
if requiresQuotes(url) {
|
|
||||||
w.writeQuotedString(url)
|
|
||||||
} else {
|
|
||||||
w.Write([]byte(url))
|
|
||||||
}
|
|
||||||
if w.compact {
|
|
||||||
w.Write([]byte("]:<"))
|
|
||||||
} else {
|
|
||||||
w.Write([]byte("]: <\n"))
|
|
||||||
w.indent++
|
|
||||||
}
|
|
||||||
if err := w.writeMessage(m2); err != nil {
|
|
||||||
return true, err
|
|
||||||
}
|
|
||||||
if w.compact {
|
|
||||||
w.Write([]byte("> "))
|
|
||||||
} else {
|
|
||||||
w.indent--
|
|
||||||
w.Write([]byte(">\n"))
|
|
||||||
}
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *textWriter) writeMessage(m protoreflect.Message) error {
|
|
||||||
md := m.Descriptor()
|
|
||||||
if w.expandAny && md.FullName() == "google.protobuf.Any" {
|
|
||||||
if canExpand, err := w.writeProto3Any(m); canExpand {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fds := md.Fields()
|
|
||||||
for i := 0; i < fds.Len(); {
|
|
||||||
fd := fds.Get(i)
|
|
||||||
if od := fd.ContainingOneof(); od != nil {
|
|
||||||
fd = m.WhichOneof(od)
|
|
||||||
i += od.Fields().Len()
|
|
||||||
} else {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
if fd == nil || !m.Has(fd) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case fd.IsList():
|
|
||||||
lv := m.Get(fd).List()
|
|
||||||
for j := 0; j < lv.Len(); j++ {
|
|
||||||
w.writeName(fd)
|
|
||||||
v := lv.Get(j)
|
|
||||||
if err := w.writeSingularValue(v, fd); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
w.WriteByte('\n')
|
|
||||||
}
|
|
||||||
case fd.IsMap():
|
|
||||||
kfd := fd.MapKey()
|
|
||||||
vfd := fd.MapValue()
|
|
||||||
mv := m.Get(fd).Map()
|
|
||||||
|
|
||||||
type entry struct{ key, val protoreflect.Value }
|
|
||||||
var entries []entry
|
|
||||||
mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
|
|
||||||
entries = append(entries, entry{k.Value(), v})
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
sort.Slice(entries, func(i, j int) bool {
|
|
||||||
switch kfd.Kind() {
|
|
||||||
case protoreflect.BoolKind:
|
|
||||||
return !entries[i].key.Bool() && entries[j].key.Bool()
|
|
||||||
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
|
|
||||||
return entries[i].key.Int() < entries[j].key.Int()
|
|
||||||
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
|
|
||||||
return entries[i].key.Uint() < entries[j].key.Uint()
|
|
||||||
case protoreflect.StringKind:
|
|
||||||
return entries[i].key.String() < entries[j].key.String()
|
|
||||||
default:
|
|
||||||
panic("invalid kind")
|
|
||||||
}
|
|
||||||
})
|
|
||||||
for _, entry := range entries {
|
|
||||||
w.writeName(fd)
|
|
||||||
w.WriteByte('<')
|
|
||||||
if !w.compact {
|
|
||||||
w.WriteByte('\n')
|
|
||||||
}
|
|
||||||
w.indent++
|
|
||||||
w.writeName(kfd)
|
|
||||||
if err := w.writeSingularValue(entry.key, kfd); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
w.WriteByte('\n')
|
|
||||||
w.writeName(vfd)
|
|
||||||
if err := w.writeSingularValue(entry.val, vfd); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
w.WriteByte('\n')
|
|
||||||
w.indent--
|
|
||||||
w.WriteByte('>')
|
|
||||||
w.WriteByte('\n')
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
w.writeName(fd)
|
|
||||||
if err := w.writeSingularValue(m.Get(fd), fd); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
w.WriteByte('\n')
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if b := m.GetUnknown(); len(b) > 0 {
|
|
||||||
w.writeUnknownFields(b)
|
|
||||||
}
|
|
||||||
return w.writeExtensions(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
|
|
||||||
switch fd.Kind() {
|
|
||||||
case protoreflect.FloatKind, protoreflect.DoubleKind:
|
|
||||||
switch vf := v.Float(); {
|
|
||||||
case math.IsInf(vf, +1):
|
|
||||||
w.Write(posInf)
|
|
||||||
case math.IsInf(vf, -1):
|
|
||||||
w.Write(negInf)
|
|
||||||
case math.IsNaN(vf):
|
|
||||||
w.Write(nan)
|
|
||||||
default:
|
|
||||||
fmt.Fprint(w, v.Interface())
|
|
||||||
}
|
|
||||||
case protoreflect.StringKind:
|
|
||||||
// NOTE: This does not validate UTF-8 for historical reasons.
|
|
||||||
w.writeQuotedString(string(v.String()))
|
|
||||||
case protoreflect.BytesKind:
|
|
||||||
w.writeQuotedString(string(v.Bytes()))
|
|
||||||
case protoreflect.MessageKind, protoreflect.GroupKind:
|
|
||||||
var bra, ket byte = '<', '>'
|
|
||||||
if fd.Kind() == protoreflect.GroupKind {
|
|
||||||
bra, ket = '{', '}'
|
|
||||||
}
|
|
||||||
w.WriteByte(bra)
|
|
||||||
if !w.compact {
|
|
||||||
w.WriteByte('\n')
|
|
||||||
}
|
|
||||||
w.indent++
|
|
||||||
m := v.Message()
|
|
||||||
if m2, ok := m.Interface().(encoding.TextMarshaler); ok {
|
|
||||||
b, err := m2.MarshalText()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
w.Write(b)
|
|
||||||
} else {
|
|
||||||
w.writeMessage(m)
|
|
||||||
}
|
|
||||||
w.indent--
|
|
||||||
w.WriteByte(ket)
|
|
||||||
case protoreflect.EnumKind:
|
|
||||||
if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil {
|
|
||||||
fmt.Fprint(w, ev.Name())
|
|
||||||
} else {
|
|
||||||
fmt.Fprint(w, v.Enum())
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
fmt.Fprint(w, v.Interface())
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeQuotedString writes a quoted string in the protocol buffer text format.
|
|
||||||
func (w *textWriter) writeQuotedString(s string) {
|
|
||||||
w.WriteByte('"')
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
switch c := s[i]; c {
|
|
||||||
case '\n':
|
|
||||||
w.buf = append(w.buf, `\n`...)
|
|
||||||
case '\r':
|
|
||||||
w.buf = append(w.buf, `\r`...)
|
|
||||||
case '\t':
|
|
||||||
w.buf = append(w.buf, `\t`...)
|
|
||||||
case '"':
|
|
||||||
w.buf = append(w.buf, `\"`...)
|
|
||||||
case '\\':
|
|
||||||
w.buf = append(w.buf, `\\`...)
|
|
||||||
default:
|
|
||||||
if isPrint := c >= 0x20 && c < 0x7f; isPrint {
|
|
||||||
w.buf = append(w.buf, c)
|
|
||||||
} else {
|
|
||||||
w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
w.WriteByte('"')
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *textWriter) writeUnknownFields(b []byte) {
|
|
||||||
if !w.compact {
|
|
||||||
fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b))
|
|
||||||
}
|
|
||||||
|
|
||||||
for len(b) > 0 {
|
|
||||||
num, wtyp, n := protowire.ConsumeTag(b)
|
|
||||||
if n < 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
b = b[n:]
|
|
||||||
|
|
||||||
if wtyp == protowire.EndGroupType {
|
|
||||||
w.indent--
|
|
||||||
w.Write(endBraceNewline)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
fmt.Fprint(w, num)
|
|
||||||
if wtyp != protowire.StartGroupType {
|
|
||||||
w.WriteByte(':')
|
|
||||||
}
|
|
||||||
if !w.compact || wtyp == protowire.StartGroupType {
|
|
||||||
w.WriteByte(' ')
|
|
||||||
}
|
|
||||||
switch wtyp {
|
|
||||||
case protowire.VarintType:
|
|
||||||
v, n := protowire.ConsumeVarint(b)
|
|
||||||
if n < 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
b = b[n:]
|
|
||||||
fmt.Fprint(w, v)
|
|
||||||
case protowire.Fixed32Type:
|
|
||||||
v, n := protowire.ConsumeFixed32(b)
|
|
||||||
if n < 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
b = b[n:]
|
|
||||||
fmt.Fprint(w, v)
|
|
||||||
case protowire.Fixed64Type:
|
|
||||||
v, n := protowire.ConsumeFixed64(b)
|
|
||||||
if n < 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
b = b[n:]
|
|
||||||
fmt.Fprint(w, v)
|
|
||||||
case protowire.BytesType:
|
|
||||||
v, n := protowire.ConsumeBytes(b)
|
|
||||||
if n < 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
b = b[n:]
|
|
||||||
fmt.Fprintf(w, "%q", v)
|
|
||||||
case protowire.StartGroupType:
|
|
||||||
w.WriteByte('{')
|
|
||||||
w.indent++
|
|
||||||
default:
|
|
||||||
fmt.Fprintf(w, "/* unknown wire type %d */", wtyp)
|
|
||||||
}
|
|
||||||
w.WriteByte('\n')
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeExtensions writes all the extensions in m.
|
|
||||||
func (w *textWriter) writeExtensions(m protoreflect.Message) error {
|
|
||||||
md := m.Descriptor()
|
|
||||||
if md.ExtensionRanges().Len() == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type ext struct {
|
|
||||||
desc protoreflect.FieldDescriptor
|
|
||||||
val protoreflect.Value
|
|
||||||
}
|
|
||||||
var exts []ext
|
|
||||||
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
|
||||||
if fd.IsExtension() {
|
|
||||||
exts = append(exts, ext{fd, v})
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
sort.Slice(exts, func(i, j int) bool {
|
|
||||||
return exts[i].desc.Number() < exts[j].desc.Number()
|
|
||||||
})
|
|
||||||
|
|
||||||
for _, ext := range exts {
|
|
||||||
// For message set, use the name of the message as the extension name.
|
|
||||||
name := string(ext.desc.FullName())
|
|
||||||
if isMessageSet(ext.desc.ContainingMessage()) {
|
|
||||||
name = strings.TrimSuffix(name, ".message_set_extension")
|
|
||||||
}
|
|
||||||
|
|
||||||
if !ext.desc.IsList() {
|
|
||||||
if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
lv := ext.val.List()
|
|
||||||
for i := 0; i < lv.Len(); i++ {
|
|
||||||
if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
|
|
||||||
fmt.Fprintf(w, "[%s]:", name)
|
|
||||||
if !w.compact {
|
|
||||||
w.WriteByte(' ')
|
|
||||||
}
|
|
||||||
if err := w.writeSingularValue(v, fd); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
w.WriteByte('\n')
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *textWriter) writeIndent() {
|
|
||||||
if !w.complete {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for i := 0; i < w.indent*2; i++ {
|
|
||||||
w.buf = append(w.buf, ' ')
|
|
||||||
}
|
|
||||||
w.complete = false
|
|
||||||
}
|
|
78
vendor/github.com/golang/protobuf/proto/wire.go
generated
vendored
78
vendor/github.com/golang/protobuf/proto/wire.go
generated
vendored
@ -1,78 +0,0 @@
|
|||||||
// Copyright 2019 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
import (
|
|
||||||
protoV2 "google.golang.org/protobuf/proto"
|
|
||||||
"google.golang.org/protobuf/runtime/protoiface"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Size returns the size in bytes of the wire-format encoding of m.
|
|
||||||
func Size(m Message) int {
|
|
||||||
if m == nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
mi := MessageV2(m)
|
|
||||||
return protoV2.Size(mi)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Marshal returns the wire-format encoding of m.
|
|
||||||
func Marshal(m Message) ([]byte, error) {
|
|
||||||
b, err := marshalAppend(nil, m, false)
|
|
||||||
if b == nil {
|
|
||||||
b = zeroBytes
|
|
||||||
}
|
|
||||||
return b, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var zeroBytes = make([]byte, 0, 0)
|
|
||||||
|
|
||||||
func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) {
|
|
||||||
if m == nil {
|
|
||||||
return nil, ErrNil
|
|
||||||
}
|
|
||||||
mi := MessageV2(m)
|
|
||||||
nbuf, err := protoV2.MarshalOptions{
|
|
||||||
Deterministic: deterministic,
|
|
||||||
AllowPartial: true,
|
|
||||||
}.MarshalAppend(buf, mi)
|
|
||||||
if err != nil {
|
|
||||||
return buf, err
|
|
||||||
}
|
|
||||||
if len(buf) == len(nbuf) {
|
|
||||||
if !mi.ProtoReflect().IsValid() {
|
|
||||||
return buf, ErrNil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nbuf, checkRequiredNotSet(mi)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmarshal parses a wire-format message in b and places the decoded results in m.
|
|
||||||
//
|
|
||||||
// Unmarshal resets m before starting to unmarshal, so any existing data in m is always
|
|
||||||
// removed. Use UnmarshalMerge to preserve and append to existing data.
|
|
||||||
func Unmarshal(b []byte, m Message) error {
|
|
||||||
m.Reset()
|
|
||||||
return UnmarshalMerge(b, m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalMerge parses a wire-format message in b and places the decoded results in m.
|
|
||||||
func UnmarshalMerge(b []byte, m Message) error {
|
|
||||||
mi := MessageV2(m)
|
|
||||||
out, err := protoV2.UnmarshalOptions{
|
|
||||||
AllowPartial: true,
|
|
||||||
Merge: true,
|
|
||||||
}.UnmarshalState(protoiface.UnmarshalInput{
|
|
||||||
Buf: b,
|
|
||||||
Message: mi.ProtoReflect(),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if out.Flags&protoiface.UnmarshalInitialized > 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return checkRequiredNotSet(mi)
|
|
||||||
}
|
|
34
vendor/github.com/golang/protobuf/proto/wrappers.go
generated
vendored
34
vendor/github.com/golang/protobuf/proto/wrappers.go
generated
vendored
@ -1,34 +0,0 @@
|
|||||||
// Copyright 2019 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
// Bool stores v in a new bool value and returns a pointer to it.
|
|
||||||
func Bool(v bool) *bool { return &v }
|
|
||||||
|
|
||||||
// Int stores v in a new int32 value and returns a pointer to it.
|
|
||||||
//
|
|
||||||
// Deprecated: Use Int32 instead.
|
|
||||||
func Int(v int) *int32 { return Int32(int32(v)) }
|
|
||||||
|
|
||||||
// Int32 stores v in a new int32 value and returns a pointer to it.
|
|
||||||
func Int32(v int32) *int32 { return &v }
|
|
||||||
|
|
||||||
// Int64 stores v in a new int64 value and returns a pointer to it.
|
|
||||||
func Int64(v int64) *int64 { return &v }
|
|
||||||
|
|
||||||
// Uint32 stores v in a new uint32 value and returns a pointer to it.
|
|
||||||
func Uint32(v uint32) *uint32 { return &v }
|
|
||||||
|
|
||||||
// Uint64 stores v in a new uint64 value and returns a pointer to it.
|
|
||||||
func Uint64(v uint64) *uint64 { return &v }
|
|
||||||
|
|
||||||
// Float32 stores v in a new float32 value and returns a pointer to it.
|
|
||||||
func Float32(v float32) *float32 { return &v }
|
|
||||||
|
|
||||||
// Float64 stores v in a new float64 value and returns a pointer to it.
|
|
||||||
func Float64(v float64) *float64 { return &v }
|
|
||||||
|
|
||||||
// String stores v in a new string value and returns a pointer to it.
|
|
||||||
func String(v string) *string { return &v }
|
|
64
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
generated
vendored
64
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
generated
vendored
@ -1,64 +0,0 @@
|
|||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|
||||||
// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
|
|
||||||
|
|
||||||
package timestamp
|
|
||||||
|
|
||||||
import (
|
|
||||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
|
||||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
|
||||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
|
||||||
reflect "reflect"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Symbols defined in public import of google/protobuf/timestamp.proto.
|
|
||||||
|
|
||||||
type Timestamp = timestamppb.Timestamp
|
|
||||||
|
|
||||||
var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor
|
|
||||||
|
|
||||||
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{
|
|
||||||
0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
|
||||||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
|
||||||
0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69,
|
|
||||||
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67,
|
|
||||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74,
|
|
||||||
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37,
|
|
||||||
0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
|
||||||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
|
||||||
0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69,
|
|
||||||
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
|
||||||
0x33,
|
|
||||||
}
|
|
||||||
|
|
||||||
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{}
|
|
||||||
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{
|
|
||||||
0, // [0:0] is the sub-list for method output_type
|
|
||||||
0, // [0:0] is the sub-list for method input_type
|
|
||||||
0, // [0:0] is the sub-list for extension type_name
|
|
||||||
0, // [0:0] is the sub-list for extension extendee
|
|
||||||
0, // [0:0] is the sub-list for field type_name
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() }
|
|
||||||
func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() {
|
|
||||||
if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
type x struct{}
|
|
||||||
out := protoimpl.TypeBuilder{
|
|
||||||
File: protoimpl.DescBuilder{
|
|
||||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
|
||||||
RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc,
|
|
||||||
NumEnums: 0,
|
|
||||||
NumMessages: 0,
|
|
||||||
NumExtensions: 0,
|
|
||||||
NumServices: 0,
|
|
||||||
},
|
|
||||||
GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes,
|
|
||||||
DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs,
|
|
||||||
}.Build()
|
|
||||||
File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File
|
|
||||||
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil
|
|
||||||
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil
|
|
||||||
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil
|
|
||||||
}
|
|
202
vendor/github.com/google/shlex/COPYING
generated
vendored
202
vendor/github.com/google/shlex/COPYING
generated
vendored
@ -1,202 +0,0 @@
|
|||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright [yyyy] [name of copyright owner]
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
2
vendor/github.com/google/shlex/README
generated
vendored
2
vendor/github.com/google/shlex/README
generated
vendored
@ -1,2 +0,0 @@
|
|||||||
go-shlex is a simple lexer for go that supports shell-style quoting,
|
|
||||||
commenting, and escaping.
|
|
416
vendor/github.com/google/shlex/shlex.go
generated
vendored
416
vendor/github.com/google/shlex/shlex.go
generated
vendored
@ -1,416 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2012 Google Inc. All Rights Reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
Package shlex implements a simple lexer which splits input in to tokens using
|
|
||||||
shell-style rules for quoting and commenting.
|
|
||||||
|
|
||||||
The basic use case uses the default ASCII lexer to split a string into sub-strings:
|
|
||||||
|
|
||||||
shlex.Split("one \"two three\" four") -> []string{"one", "two three", "four"}
|
|
||||||
|
|
||||||
To process a stream of strings:
|
|
||||||
|
|
||||||
l := NewLexer(os.Stdin)
|
|
||||||
for ; token, err := l.Next(); err != nil {
|
|
||||||
// process token
|
|
||||||
}
|
|
||||||
|
|
||||||
To access the raw token stream (which includes tokens for comments):
|
|
||||||
|
|
||||||
t := NewTokenizer(os.Stdin)
|
|
||||||
for ; token, err := t.Next(); err != nil {
|
|
||||||
// process token
|
|
||||||
}
|
|
||||||
|
|
||||||
*/
|
|
||||||
package shlex
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TokenType is a top-level token classification: A word, space, comment, unknown.
|
|
||||||
type TokenType int
|
|
||||||
|
|
||||||
// runeTokenClass is the type of a UTF-8 character classification: A quote, space, escape.
|
|
||||||
type runeTokenClass int
|
|
||||||
|
|
||||||
// the internal state used by the lexer state machine
|
|
||||||
type lexerState int
|
|
||||||
|
|
||||||
// Token is a (type, value) pair representing a lexographical token.
|
|
||||||
type Token struct {
|
|
||||||
tokenType TokenType
|
|
||||||
value string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Equal reports whether tokens a, and b, are equal.
|
|
||||||
// Two tokens are equal if both their types and values are equal. A nil token can
|
|
||||||
// never be equal to another token.
|
|
||||||
func (a *Token) Equal(b *Token) bool {
|
|
||||||
if a == nil || b == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if a.tokenType != b.tokenType {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return a.value == b.value
|
|
||||||
}
|
|
||||||
|
|
||||||
// Named classes of UTF-8 runes
|
|
||||||
const (
|
|
||||||
spaceRunes = " \t\r\n"
|
|
||||||
escapingQuoteRunes = `"`
|
|
||||||
nonEscapingQuoteRunes = "'"
|
|
||||||
escapeRunes = `\`
|
|
||||||
commentRunes = "#"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Classes of rune token
|
|
||||||
const (
|
|
||||||
unknownRuneClass runeTokenClass = iota
|
|
||||||
spaceRuneClass
|
|
||||||
escapingQuoteRuneClass
|
|
||||||
nonEscapingQuoteRuneClass
|
|
||||||
escapeRuneClass
|
|
||||||
commentRuneClass
|
|
||||||
eofRuneClass
|
|
||||||
)
|
|
||||||
|
|
||||||
// Classes of lexographic token
|
|
||||||
const (
|
|
||||||
UnknownToken TokenType = iota
|
|
||||||
WordToken
|
|
||||||
SpaceToken
|
|
||||||
CommentToken
|
|
||||||
)
|
|
||||||
|
|
||||||
// Lexer state machine states
|
|
||||||
const (
|
|
||||||
startState lexerState = iota // no runes have been seen
|
|
||||||
inWordState // processing regular runes in a word
|
|
||||||
escapingState // we have just consumed an escape rune; the next rune is literal
|
|
||||||
escapingQuotedState // we have just consumed an escape rune within a quoted string
|
|
||||||
quotingEscapingState // we are within a quoted string that supports escaping ("...")
|
|
||||||
quotingState // we are within a string that does not support escaping ('...')
|
|
||||||
commentState // we are within a comment (everything following an unquoted or unescaped #
|
|
||||||
)
|
|
||||||
|
|
||||||
// tokenClassifier is used for classifying rune characters.
|
|
||||||
type tokenClassifier map[rune]runeTokenClass
|
|
||||||
|
|
||||||
func (typeMap tokenClassifier) addRuneClass(runes string, tokenType runeTokenClass) {
|
|
||||||
for _, runeChar := range runes {
|
|
||||||
typeMap[runeChar] = tokenType
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// newDefaultClassifier creates a new classifier for ASCII characters.
|
|
||||||
func newDefaultClassifier() tokenClassifier {
|
|
||||||
t := tokenClassifier{}
|
|
||||||
t.addRuneClass(spaceRunes, spaceRuneClass)
|
|
||||||
t.addRuneClass(escapingQuoteRunes, escapingQuoteRuneClass)
|
|
||||||
t.addRuneClass(nonEscapingQuoteRunes, nonEscapingQuoteRuneClass)
|
|
||||||
t.addRuneClass(escapeRunes, escapeRuneClass)
|
|
||||||
t.addRuneClass(commentRunes, commentRuneClass)
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClassifyRune classifiees a rune
|
|
||||||
func (t tokenClassifier) ClassifyRune(runeVal rune) runeTokenClass {
|
|
||||||
return t[runeVal]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Lexer turns an input stream into a sequence of tokens. Whitespace and comments are skipped.
|
|
||||||
type Lexer Tokenizer
|
|
||||||
|
|
||||||
// NewLexer creates a new lexer from an input stream.
|
|
||||||
func NewLexer(r io.Reader) *Lexer {
|
|
||||||
|
|
||||||
return (*Lexer)(NewTokenizer(r))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Next returns the next word, or an error. If there are no more words,
|
|
||||||
// the error will be io.EOF.
|
|
||||||
func (l *Lexer) Next() (string, error) {
|
|
||||||
for {
|
|
||||||
token, err := (*Tokenizer)(l).Next()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
switch token.tokenType {
|
|
||||||
case WordToken:
|
|
||||||
return token.value, nil
|
|
||||||
case CommentToken:
|
|
||||||
// skip comments
|
|
||||||
default:
|
|
||||||
return "", fmt.Errorf("Unknown token type: %v", token.tokenType)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tokenizer turns an input stream into a sequence of typed tokens
|
|
||||||
type Tokenizer struct {
|
|
||||||
input bufio.Reader
|
|
||||||
classifier tokenClassifier
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTokenizer creates a new tokenizer from an input stream.
|
|
||||||
func NewTokenizer(r io.Reader) *Tokenizer {
|
|
||||||
input := bufio.NewReader(r)
|
|
||||||
classifier := newDefaultClassifier()
|
|
||||||
return &Tokenizer{
|
|
||||||
input: *input,
|
|
||||||
classifier: classifier}
|
|
||||||
}
|
|
||||||
|
|
||||||
// scanStream scans the stream for the next token using the internal state machine.
|
|
||||||
// It will panic if it encounters a rune which it does not know how to handle.
|
|
||||||
func (t *Tokenizer) scanStream() (*Token, error) {
|
|
||||||
state := startState
|
|
||||||
var tokenType TokenType
|
|
||||||
var value []rune
|
|
||||||
var nextRune rune
|
|
||||||
var nextRuneType runeTokenClass
|
|
||||||
var err error
|
|
||||||
|
|
||||||
for {
|
|
||||||
nextRune, _, err = t.input.ReadRune()
|
|
||||||
nextRuneType = t.classifier.ClassifyRune(nextRune)
|
|
||||||
|
|
||||||
if err == io.EOF {
|
|
||||||
nextRuneType = eofRuneClass
|
|
||||||
err = nil
|
|
||||||
} else if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
switch state {
|
|
||||||
case startState: // no runes read yet
|
|
||||||
{
|
|
||||||
switch nextRuneType {
|
|
||||||
case eofRuneClass:
|
|
||||||
{
|
|
||||||
return nil, io.EOF
|
|
||||||
}
|
|
||||||
case spaceRuneClass:
|
|
||||||
{
|
|
||||||
}
|
|
||||||
case escapingQuoteRuneClass:
|
|
||||||
{
|
|
||||||
tokenType = WordToken
|
|
||||||
state = quotingEscapingState
|
|
||||||
}
|
|
||||||
case nonEscapingQuoteRuneClass:
|
|
||||||
{
|
|
||||||
tokenType = WordToken
|
|
||||||
state = quotingState
|
|
||||||
}
|
|
||||||
case escapeRuneClass:
|
|
||||||
{
|
|
||||||
tokenType = WordToken
|
|
||||||
state = escapingState
|
|
||||||
}
|
|
||||||
case commentRuneClass:
|
|
||||||
{
|
|
||||||
tokenType = CommentToken
|
|
||||||
state = commentState
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
{
|
|
||||||
tokenType = WordToken
|
|
||||||
value = append(value, nextRune)
|
|
||||||
state = inWordState
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case inWordState: // in a regular word
|
|
||||||
{
|
|
||||||
switch nextRuneType {
|
|
||||||
case eofRuneClass:
|
|
||||||
{
|
|
||||||
token := &Token{
|
|
||||||
tokenType: tokenType,
|
|
||||||
value: string(value)}
|
|
||||||
return token, err
|
|
||||||
}
|
|
||||||
case spaceRuneClass:
|
|
||||||
{
|
|
||||||
token := &Token{
|
|
||||||
tokenType: tokenType,
|
|
||||||
value: string(value)}
|
|
||||||
return token, err
|
|
||||||
}
|
|
||||||
case escapingQuoteRuneClass:
|
|
||||||
{
|
|
||||||
state = quotingEscapingState
|
|
||||||
}
|
|
||||||
case nonEscapingQuoteRuneClass:
|
|
||||||
{
|
|
||||||
state = quotingState
|
|
||||||
}
|
|
||||||
case escapeRuneClass:
|
|
||||||
{
|
|
||||||
state = escapingState
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
{
|
|
||||||
value = append(value, nextRune)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case escapingState: // the rune after an escape character
|
|
||||||
{
|
|
||||||
switch nextRuneType {
|
|
||||||
case eofRuneClass:
|
|
||||||
{
|
|
||||||
err = fmt.Errorf("EOF found after escape character")
|
|
||||||
token := &Token{
|
|
||||||
tokenType: tokenType,
|
|
||||||
value: string(value)}
|
|
||||||
return token, err
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
{
|
|
||||||
state = inWordState
|
|
||||||
value = append(value, nextRune)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case escapingQuotedState: // the next rune after an escape character, in double quotes
|
|
||||||
{
|
|
||||||
switch nextRuneType {
|
|
||||||
case eofRuneClass:
|
|
||||||
{
|
|
||||||
err = fmt.Errorf("EOF found after escape character")
|
|
||||||
token := &Token{
|
|
||||||
tokenType: tokenType,
|
|
||||||
value: string(value)}
|
|
||||||
return token, err
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
{
|
|
||||||
state = quotingEscapingState
|
|
||||||
value = append(value, nextRune)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case quotingEscapingState: // in escaping double quotes
|
|
||||||
{
|
|
||||||
switch nextRuneType {
|
|
||||||
case eofRuneClass:
|
|
||||||
{
|
|
||||||
err = fmt.Errorf("EOF found when expecting closing quote")
|
|
||||||
token := &Token{
|
|
||||||
tokenType: tokenType,
|
|
||||||
value: string(value)}
|
|
||||||
return token, err
|
|
||||||
}
|
|
||||||
case escapingQuoteRuneClass:
|
|
||||||
{
|
|
||||||
state = inWordState
|
|
||||||
}
|
|
||||||
case escapeRuneClass:
|
|
||||||
{
|
|
||||||
state = escapingQuotedState
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
{
|
|
||||||
value = append(value, nextRune)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case quotingState: // in non-escaping single quotes
|
|
||||||
{
|
|
||||||
switch nextRuneType {
|
|
||||||
case eofRuneClass:
|
|
||||||
{
|
|
||||||
err = fmt.Errorf("EOF found when expecting closing quote")
|
|
||||||
token := &Token{
|
|
||||||
tokenType: tokenType,
|
|
||||||
value: string(value)}
|
|
||||||
return token, err
|
|
||||||
}
|
|
||||||
case nonEscapingQuoteRuneClass:
|
|
||||||
{
|
|
||||||
state = inWordState
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
{
|
|
||||||
value = append(value, nextRune)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case commentState: // in a comment
|
|
||||||
{
|
|
||||||
switch nextRuneType {
|
|
||||||
case eofRuneClass:
|
|
||||||
{
|
|
||||||
token := &Token{
|
|
||||||
tokenType: tokenType,
|
|
||||||
value: string(value)}
|
|
||||||
return token, err
|
|
||||||
}
|
|
||||||
case spaceRuneClass:
|
|
||||||
{
|
|
||||||
if nextRune == '\n' {
|
|
||||||
state = startState
|
|
||||||
token := &Token{
|
|
||||||
tokenType: tokenType,
|
|
||||||
value: string(value)}
|
|
||||||
return token, err
|
|
||||||
} else {
|
|
||||||
value = append(value, nextRune)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
{
|
|
||||||
value = append(value, nextRune)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
{
|
|
||||||
return nil, fmt.Errorf("Unexpected state: %v", state)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Next returns the next token in the stream.
|
|
||||||
func (t *Tokenizer) Next() (*Token, error) {
|
|
||||||
return t.scanStream()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Split partitions a string into a slice of strings.
|
|
||||||
func Split(s string) ([]string, error) {
|
|
||||||
l := NewLexer(strings.NewReader(s))
|
|
||||||
subStrings := make([]string, 0)
|
|
||||||
for {
|
|
||||||
word, err := l.Next()
|
|
||||||
if err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
return subStrings, nil
|
|
||||||
}
|
|
||||||
return subStrings, err
|
|
||||||
}
|
|
||||||
subStrings = append(subStrings, word)
|
|
||||||
}
|
|
||||||
}
|
|
9
vendor/github.com/google/uuid/.travis.yml
generated
vendored
9
vendor/github.com/google/uuid/.travis.yml
generated
vendored
@ -1,9 +0,0 @@
|
|||||||
language: go
|
|
||||||
|
|
||||||
go:
|
|
||||||
- 1.4.3
|
|
||||||
- 1.5.3
|
|
||||||
- tip
|
|
||||||
|
|
||||||
script:
|
|
||||||
- go test -v ./...
|
|
10
vendor/github.com/google/uuid/CONTRIBUTING.md
generated
vendored
10
vendor/github.com/google/uuid/CONTRIBUTING.md
generated
vendored
@ -1,10 +0,0 @@
|
|||||||
# How to contribute
|
|
||||||
|
|
||||||
We definitely welcome patches and contribution to this project!
|
|
||||||
|
|
||||||
### Legal requirements
|
|
||||||
|
|
||||||
In order to protect both you and ourselves, you will need to sign the
|
|
||||||
[Contributor License Agreement](https://cla.developers.google.com/clas).
|
|
||||||
|
|
||||||
You may have already signed it for other Google projects.
|
|
9
vendor/github.com/google/uuid/CONTRIBUTORS
generated
vendored
9
vendor/github.com/google/uuid/CONTRIBUTORS
generated
vendored
@ -1,9 +0,0 @@
|
|||||||
Paul Borman <borman@google.com>
|
|
||||||
bmatsuo
|
|
||||||
shawnps
|
|
||||||
theory
|
|
||||||
jboverfelt
|
|
||||||
dsymonds
|
|
||||||
cd1
|
|
||||||
wallclockbuilder
|
|
||||||
dansouza
|
|
27
vendor/github.com/google/uuid/LICENSE
generated
vendored
27
vendor/github.com/google/uuid/LICENSE
generated
vendored
@ -1,27 +0,0 @@
|
|||||||
Copyright (c) 2009,2014 Google Inc. All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
|
||||||
modification, are permitted provided that the following conditions are
|
|
||||||
met:
|
|
||||||
|
|
||||||
* Redistributions of source code must retain the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer.
|
|
||||||
* Redistributions in binary form must reproduce the above
|
|
||||||
copyright notice, this list of conditions and the following disclaimer
|
|
||||||
in the documentation and/or other materials provided with the
|
|
||||||
distribution.
|
|
||||||
* Neither the name of Google Inc. nor the names of its
|
|
||||||
contributors may be used to endorse or promote products derived from
|
|
||||||
this software without specific prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
19
vendor/github.com/google/uuid/README.md
generated
vendored
19
vendor/github.com/google/uuid/README.md
generated
vendored
@ -1,19 +0,0 @@
|
|||||||
# uuid 
|
|
||||||
The uuid package generates and inspects UUIDs based on
|
|
||||||
[RFC 4122](http://tools.ietf.org/html/rfc4122)
|
|
||||||
and DCE 1.1: Authentication and Security Services.
|
|
||||||
|
|
||||||
This package is based on the github.com/pborman/uuid package (previously named
|
|
||||||
code.google.com/p/go-uuid). It differs from these earlier packages in that
|
|
||||||
a UUID is a 16 byte array rather than a byte slice. One loss due to this
|
|
||||||
change is the ability to represent an invalid UUID (vs a NIL UUID).
|
|
||||||
|
|
||||||
###### Install
|
|
||||||
`go get github.com/google/uuid`
|
|
||||||
|
|
||||||
###### Documentation
|
|
||||||
[](http://godoc.org/github.com/google/uuid)
|
|
||||||
|
|
||||||
Full `go doc` style documentation for the package can be viewed online without
|
|
||||||
installing this package by using the GoDoc site here:
|
|
||||||
http://pkg.go.dev/github.com/google/uuid
|
|
80
vendor/github.com/google/uuid/dce.go
generated
vendored
80
vendor/github.com/google/uuid/dce.go
generated
vendored
@ -1,80 +0,0 @@
|
|||||||
// Copyright 2016 Google Inc. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package uuid
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Domain represents a Version 2 domain
|
|
||||||
type Domain byte
|
|
||||||
|
|
||||||
// Domain constants for DCE Security (Version 2) UUIDs.
|
|
||||||
const (
|
|
||||||
Person = Domain(0)
|
|
||||||
Group = Domain(1)
|
|
||||||
Org = Domain(2)
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewDCESecurity returns a DCE Security (Version 2) UUID.
|
|
||||||
//
|
|
||||||
// The domain should be one of Person, Group or Org.
|
|
||||||
// On a POSIX system the id should be the users UID for the Person
|
|
||||||
// domain and the users GID for the Group. The meaning of id for
|
|
||||||
// the domain Org or on non-POSIX systems is site defined.
|
|
||||||
//
|
|
||||||
// For a given domain/id pair the same token may be returned for up to
|
|
||||||
// 7 minutes and 10 seconds.
|
|
||||||
func NewDCESecurity(domain Domain, id uint32) (UUID, error) {
|
|
||||||
uuid, err := NewUUID()
|
|
||||||
if err == nil {
|
|
||||||
uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2
|
|
||||||
uuid[9] = byte(domain)
|
|
||||||
binary.BigEndian.PutUint32(uuid[0:], id)
|
|
||||||
}
|
|
||||||
return uuid, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
|
|
||||||
// domain with the id returned by os.Getuid.
|
|
||||||
//
|
|
||||||
// NewDCESecurity(Person, uint32(os.Getuid()))
|
|
||||||
func NewDCEPerson() (UUID, error) {
|
|
||||||
return NewDCESecurity(Person, uint32(os.Getuid()))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
|
|
||||||
// domain with the id returned by os.Getgid.
|
|
||||||
//
|
|
||||||
// NewDCESecurity(Group, uint32(os.Getgid()))
|
|
||||||
func NewDCEGroup() (UUID, error) {
|
|
||||||
return NewDCESecurity(Group, uint32(os.Getgid()))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Domain returns the domain for a Version 2 UUID. Domains are only defined
|
|
||||||
// for Version 2 UUIDs.
|
|
||||||
func (uuid UUID) Domain() Domain {
|
|
||||||
return Domain(uuid[9])
|
|
||||||
}
|
|
||||||
|
|
||||||
// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2
|
|
||||||
// UUIDs.
|
|
||||||
func (uuid UUID) ID() uint32 {
|
|
||||||
return binary.BigEndian.Uint32(uuid[0:4])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d Domain) String() string {
|
|
||||||
switch d {
|
|
||||||
case Person:
|
|
||||||
return "Person"
|
|
||||||
case Group:
|
|
||||||
return "Group"
|
|
||||||
case Org:
|
|
||||||
return "Org"
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("Domain%d", int(d))
|
|
||||||
}
|
|
12
vendor/github.com/google/uuid/doc.go
generated
vendored
12
vendor/github.com/google/uuid/doc.go
generated
vendored
@ -1,12 +0,0 @@
|
|||||||
// Copyright 2016 Google Inc. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package uuid generates and inspects UUIDs.
|
|
||||||
//
|
|
||||||
// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security
|
|
||||||
// Services.
|
|
||||||
//
|
|
||||||
// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to
|
|
||||||
// maps or compared directly.
|
|
||||||
package uuid
|
|
53
vendor/github.com/google/uuid/hash.go
generated
vendored
53
vendor/github.com/google/uuid/hash.go
generated
vendored
@ -1,53 +0,0 @@
|
|||||||
// Copyright 2016 Google Inc. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package uuid
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/md5"
|
|
||||||
"crypto/sha1"
|
|
||||||
"hash"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Well known namespace IDs and UUIDs
|
|
||||||
var (
|
|
||||||
NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8"))
|
|
||||||
NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8"))
|
|
||||||
NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
|
|
||||||
NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
|
|
||||||
Nil UUID // empty UUID, all zeros
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewHash returns a new UUID derived from the hash of space concatenated with
|
|
||||||
// data generated by h. The hash should be at least 16 byte in length. The
|
|
||||||
// first 16 bytes of the hash are used to form the UUID. The version of the
|
|
||||||
// UUID will be the lower 4 bits of version. NewHash is used to implement
|
|
||||||
// NewMD5 and NewSHA1.
|
|
||||||
func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
|
|
||||||
h.Reset()
|
|
||||||
h.Write(space[:]) //nolint:errcheck
|
|
||||||
h.Write(data) //nolint:errcheck
|
|
||||||
s := h.Sum(nil)
|
|
||||||
var uuid UUID
|
|
||||||
copy(uuid[:], s)
|
|
||||||
uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4)
|
|
||||||
uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant
|
|
||||||
return uuid
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMD5 returns a new MD5 (Version 3) UUID based on the
|
|
||||||
// supplied name space and data. It is the same as calling:
|
|
||||||
//
|
|
||||||
// NewHash(md5.New(), space, data, 3)
|
|
||||||
func NewMD5(space UUID, data []byte) UUID {
|
|
||||||
return NewHash(md5.New(), space, data, 3)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
|
|
||||||
// supplied name space and data. It is the same as calling:
|
|
||||||
//
|
|
||||||
// NewHash(sha1.New(), space, data, 5)
|
|
||||||
func NewSHA1(space UUID, data []byte) UUID {
|
|
||||||
return NewHash(sha1.New(), space, data, 5)
|
|
||||||
}
|
|
38
vendor/github.com/google/uuid/marshal.go
generated
vendored
38
vendor/github.com/google/uuid/marshal.go
generated
vendored
@ -1,38 +0,0 @@
|
|||||||
// Copyright 2016 Google Inc. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package uuid
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
|
|
||||||
// MarshalText implements encoding.TextMarshaler.
|
|
||||||
func (uuid UUID) MarshalText() ([]byte, error) {
|
|
||||||
var js [36]byte
|
|
||||||
encodeHex(js[:], uuid)
|
|
||||||
return js[:], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalText implements encoding.TextUnmarshaler.
|
|
||||||
func (uuid *UUID) UnmarshalText(data []byte) error {
|
|
||||||
id, err := ParseBytes(data)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
*uuid = id
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalBinary implements encoding.BinaryMarshaler.
|
|
||||||
func (uuid UUID) MarshalBinary() ([]byte, error) {
|
|
||||||
return uuid[:], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
|
|
||||||
func (uuid *UUID) UnmarshalBinary(data []byte) error {
|
|
||||||
if len(data) != 16 {
|
|
||||||
return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
|
|
||||||
}
|
|
||||||
copy(uuid[:], data)
|
|
||||||
return nil
|
|
||||||
}
|
|
90
vendor/github.com/google/uuid/node.go
generated
vendored
90
vendor/github.com/google/uuid/node.go
generated
vendored
@ -1,90 +0,0 @@
|
|||||||
// Copyright 2016 Google Inc. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package uuid
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
nodeMu sync.Mutex
|
|
||||||
ifname string // name of interface being used
|
|
||||||
nodeID [6]byte // hardware for version 1 UUIDs
|
|
||||||
zeroID [6]byte // nodeID with only 0's
|
|
||||||
)
|
|
||||||
|
|
||||||
// NodeInterface returns the name of the interface from which the NodeID was
|
|
||||||
// derived. The interface "user" is returned if the NodeID was set by
|
|
||||||
// SetNodeID.
|
|
||||||
func NodeInterface() string {
|
|
||||||
defer nodeMu.Unlock()
|
|
||||||
nodeMu.Lock()
|
|
||||||
return ifname
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs.
|
|
||||||
// If name is "" then the first usable interface found will be used or a random
|
|
||||||
// Node ID will be generated. If a named interface cannot be found then false
|
|
||||||
// is returned.
|
|
||||||
//
|
|
||||||
// SetNodeInterface never fails when name is "".
|
|
||||||
func SetNodeInterface(name string) bool {
|
|
||||||
defer nodeMu.Unlock()
|
|
||||||
nodeMu.Lock()
|
|
||||||
return setNodeInterface(name)
|
|
||||||
}
|
|
||||||
|
|
||||||
func setNodeInterface(name string) bool {
|
|
||||||
iname, addr := getHardwareInterface(name) // null implementation for js
|
|
||||||
if iname != "" && addr != nil {
|
|
||||||
ifname = iname
|
|
||||||
copy(nodeID[:], addr)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// We found no interfaces with a valid hardware address. If name
|
|
||||||
// does not specify a specific interface generate a random Node ID
|
|
||||||
// (section 4.1.6)
|
|
||||||
if name == "" {
|
|
||||||
ifname = "random"
|
|
||||||
randomBits(nodeID[:])
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// NodeID returns a slice of a copy of the current Node ID, setting the Node ID
|
|
||||||
// if not already set.
|
|
||||||
func NodeID() []byte {
|
|
||||||
defer nodeMu.Unlock()
|
|
||||||
nodeMu.Lock()
|
|
||||||
if nodeID == zeroID {
|
|
||||||
setNodeInterface("")
|
|
||||||
}
|
|
||||||
nid := nodeID
|
|
||||||
return nid[:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes
|
|
||||||
// of id are used. If id is less than 6 bytes then false is returned and the
|
|
||||||
// Node ID is not set.
|
|
||||||
func SetNodeID(id []byte) bool {
|
|
||||||
if len(id) < 6 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
defer nodeMu.Unlock()
|
|
||||||
nodeMu.Lock()
|
|
||||||
copy(nodeID[:], id)
|
|
||||||
ifname = "user"
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is
|
|
||||||
// not valid. The NodeID is only well defined for version 1 and 2 UUIDs.
|
|
||||||
func (uuid UUID) NodeID() []byte {
|
|
||||||
var node [6]byte
|
|
||||||
copy(node[:], uuid[10:])
|
|
||||||
return node[:]
|
|
||||||
}
|
|
12
vendor/github.com/google/uuid/node_js.go
generated
vendored
12
vendor/github.com/google/uuid/node_js.go
generated
vendored
@ -1,12 +0,0 @@
|
|||||||
// Copyright 2017 Google Inc. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build js
|
|
||||||
|
|
||||||
package uuid
|
|
||||||
|
|
||||||
// getHardwareInterface returns nil values for the JS version of the code.
|
|
||||||
// This remvoves the "net" dependency, because it is not used in the browser.
|
|
||||||
// Using the "net" library inflates the size of the transpiled JS code by 673k bytes.
|
|
||||||
func getHardwareInterface(name string) (string, []byte) { return "", nil }
|
|
33
vendor/github.com/google/uuid/node_net.go
generated
vendored
33
vendor/github.com/google/uuid/node_net.go
generated
vendored
@ -1,33 +0,0 @@
|
|||||||
// Copyright 2017 Google Inc. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build !js
|
|
||||||
|
|
||||||
package uuid
|
|
||||||
|
|
||||||
import "net"
|
|
||||||
|
|
||||||
var interfaces []net.Interface // cached list of interfaces
|
|
||||||
|
|
||||||
// getHardwareInterface returns the name and hardware address of interface name.
|
|
||||||
// If name is "" then the name and hardware address of one of the system's
|
|
||||||
// interfaces is returned. If no interfaces are found (name does not exist or
|
|
||||||
// there are no interfaces) then "", nil is returned.
|
|
||||||
//
|
|
||||||
// Only addresses of at least 6 bytes are returned.
|
|
||||||
func getHardwareInterface(name string) (string, []byte) {
|
|
||||||
if interfaces == nil {
|
|
||||||
var err error
|
|
||||||
interfaces, err = net.Interfaces()
|
|
||||||
if err != nil {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, ifs := range interfaces {
|
|
||||||
if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) {
|
|
||||||
return ifs.Name, ifs.HardwareAddr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return "", nil
|
|
||||||
}
|
|
118
vendor/github.com/google/uuid/null.go
generated
vendored
118
vendor/github.com/google/uuid/null.go
generated
vendored
@ -1,118 +0,0 @@
|
|||||||
// Copyright 2021 Google Inc. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package uuid
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"database/sql/driver"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
var jsonNull = []byte("null")
|
|
||||||
|
|
||||||
// NullUUID represents a UUID that may be null.
|
|
||||||
// NullUUID implements the SQL driver.Scanner interface so
|
|
||||||
// it can be used as a scan destination:
|
|
||||||
//
|
|
||||||
// var u uuid.NullUUID
|
|
||||||
// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u)
|
|
||||||
// ...
|
|
||||||
// if u.Valid {
|
|
||||||
// // use u.UUID
|
|
||||||
// } else {
|
|
||||||
// // NULL value
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
type NullUUID struct {
|
|
||||||
UUID UUID
|
|
||||||
Valid bool // Valid is true if UUID is not NULL
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scan implements the SQL driver.Scanner interface.
|
|
||||||
func (nu *NullUUID) Scan(value interface{}) error {
|
|
||||||
if value == nil {
|
|
||||||
nu.UUID, nu.Valid = Nil, false
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
err := nu.UUID.Scan(value)
|
|
||||||
if err != nil {
|
|
||||||
nu.Valid = false
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
nu.Valid = true
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Value implements the driver Valuer interface.
|
|
||||||
func (nu NullUUID) Value() (driver.Value, error) {
|
|
||||||
if !nu.Valid {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
// Delegate to UUID Value function
|
|
||||||
return nu.UUID.Value()
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalBinary implements encoding.BinaryMarshaler.
|
|
||||||
func (nu NullUUID) MarshalBinary() ([]byte, error) {
|
|
||||||
if nu.Valid {
|
|
||||||
return nu.UUID[:], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return []byte(nil), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
|
|
||||||
func (nu *NullUUID) UnmarshalBinary(data []byte) error {
|
|
||||||
if len(data) != 16 {
|
|
||||||
return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
|
|
||||||
}
|
|
||||||
copy(nu.UUID[:], data)
|
|
||||||
nu.Valid = true
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalText implements encoding.TextMarshaler.
|
|
||||||
func (nu NullUUID) MarshalText() ([]byte, error) {
|
|
||||||
if nu.Valid {
|
|
||||||
return nu.UUID.MarshalText()
|
|
||||||
}
|
|
||||||
|
|
||||||
return jsonNull, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalText implements encoding.TextUnmarshaler.
|
|
||||||
func (nu *NullUUID) UnmarshalText(data []byte) error {
|
|
||||||
id, err := ParseBytes(data)
|
|
||||||
if err != nil {
|
|
||||||
nu.Valid = false
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
nu.UUID = id
|
|
||||||
nu.Valid = true
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON implements json.Marshaler.
|
|
||||||
func (nu NullUUID) MarshalJSON() ([]byte, error) {
|
|
||||||
if nu.Valid {
|
|
||||||
return json.Marshal(nu.UUID)
|
|
||||||
}
|
|
||||||
|
|
||||||
return jsonNull, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON implements json.Unmarshaler.
|
|
||||||
func (nu *NullUUID) UnmarshalJSON(data []byte) error {
|
|
||||||
if bytes.Equal(data, jsonNull) {
|
|
||||||
*nu = NullUUID{}
|
|
||||||
return nil // valid null UUID
|
|
||||||
}
|
|
||||||
err := json.Unmarshal(data, &nu.UUID)
|
|
||||||
nu.Valid = err == nil
|
|
||||||
return err
|
|
||||||
}
|
|
59
vendor/github.com/google/uuid/sql.go
generated
vendored
59
vendor/github.com/google/uuid/sql.go
generated
vendored
@ -1,59 +0,0 @@
|
|||||||
// Copyright 2016 Google Inc. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package uuid
|
|
||||||
|
|
||||||
import (
|
|
||||||
"database/sql/driver"
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Scan implements sql.Scanner so UUIDs can be read from databases transparently.
|
|
||||||
// Currently, database types that map to string and []byte are supported. Please
|
|
||||||
// consult database-specific driver documentation for matching types.
|
|
||||||
func (uuid *UUID) Scan(src interface{}) error {
|
|
||||||
switch src := src.(type) {
|
|
||||||
case nil:
|
|
||||||
return nil
|
|
||||||
|
|
||||||
case string:
|
|
||||||
// if an empty UUID comes from a table, we return a null UUID
|
|
||||||
if src == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// see Parse for required string format
|
|
||||||
u, err := Parse(src)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Scan: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
*uuid = u
|
|
||||||
|
|
||||||
case []byte:
|
|
||||||
// if an empty UUID comes from a table, we return a null UUID
|
|
||||||
if len(src) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// assumes a simple slice of bytes if 16 bytes
|
|
||||||
// otherwise attempts to parse
|
|
||||||
if len(src) != 16 {
|
|
||||||
return uuid.Scan(string(src))
|
|
||||||
}
|
|
||||||
copy((*uuid)[:], src)
|
|
||||||
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("Scan: unable to scan type %T into UUID", src)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Value implements sql.Valuer so that UUIDs can be written to databases
|
|
||||||
// transparently. Currently, UUIDs map to strings. Please consult
|
|
||||||
// database-specific driver documentation for matching types.
|
|
||||||
func (uuid UUID) Value() (driver.Value, error) {
|
|
||||||
return uuid.String(), nil
|
|
||||||
}
|
|
123
vendor/github.com/google/uuid/time.go
generated
vendored
123
vendor/github.com/google/uuid/time.go
generated
vendored
@ -1,123 +0,0 @@
|
|||||||
// Copyright 2016 Google Inc. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package uuid
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/binary"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Time represents a time as the number of 100's of nanoseconds since 15 Oct
|
|
||||||
// 1582.
|
|
||||||
type Time int64
|
|
||||||
|
|
||||||
const (
|
|
||||||
lillian = 2299160 // Julian day of 15 Oct 1582
|
|
||||||
unix = 2440587 // Julian day of 1 Jan 1970
|
|
||||||
epoch = unix - lillian // Days between epochs
|
|
||||||
g1582 = epoch * 86400 // seconds between epochs
|
|
||||||
g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
timeMu sync.Mutex
|
|
||||||
lasttime uint64 // last time we returned
|
|
||||||
clockSeq uint16 // clock sequence for this run
|
|
||||||
|
|
||||||
timeNow = time.Now // for testing
|
|
||||||
)
|
|
||||||
|
|
||||||
// UnixTime converts t the number of seconds and nanoseconds using the Unix
|
|
||||||
// epoch of 1 Jan 1970.
|
|
||||||
func (t Time) UnixTime() (sec, nsec int64) {
|
|
||||||
sec = int64(t - g1582ns100)
|
|
||||||
nsec = (sec % 10000000) * 100
|
|
||||||
sec /= 10000000
|
|
||||||
return sec, nsec
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
|
|
||||||
// clock sequence as well as adjusting the clock sequence as needed. An error
|
|
||||||
// is returned if the current time cannot be determined.
|
|
||||||
func GetTime() (Time, uint16, error) {
|
|
||||||
defer timeMu.Unlock()
|
|
||||||
timeMu.Lock()
|
|
||||||
return getTime()
|
|
||||||
}
|
|
||||||
|
|
||||||
func getTime() (Time, uint16, error) {
|
|
||||||
t := timeNow()
|
|
||||||
|
|
||||||
// If we don't have a clock sequence already, set one.
|
|
||||||
if clockSeq == 0 {
|
|
||||||
setClockSequence(-1)
|
|
||||||
}
|
|
||||||
now := uint64(t.UnixNano()/100) + g1582ns100
|
|
||||||
|
|
||||||
// If time has gone backwards with this clock sequence then we
|
|
||||||
// increment the clock sequence
|
|
||||||
if now <= lasttime {
|
|
||||||
clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000
|
|
||||||
}
|
|
||||||
lasttime = now
|
|
||||||
return Time(now), clockSeq, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClockSequence returns the current clock sequence, generating one if not
|
|
||||||
// already set. The clock sequence is only used for Version 1 UUIDs.
|
|
||||||
//
|
|
||||||
// The uuid package does not use global static storage for the clock sequence or
|
|
||||||
// the last time a UUID was generated. Unless SetClockSequence is used, a new
|
|
||||||
// random clock sequence is generated the first time a clock sequence is
|
|
||||||
// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1)
|
|
||||||
func ClockSequence() int {
|
|
||||||
defer timeMu.Unlock()
|
|
||||||
timeMu.Lock()
|
|
||||||
return clockSequence()
|
|
||||||
}
|
|
||||||
|
|
||||||
func clockSequence() int {
|
|
||||||
if clockSeq == 0 {
|
|
||||||
setClockSequence(-1)
|
|
||||||
}
|
|
||||||
return int(clockSeq & 0x3fff)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to
|
|
||||||
// -1 causes a new sequence to be generated.
|
|
||||||
func SetClockSequence(seq int) {
|
|
||||||
defer timeMu.Unlock()
|
|
||||||
timeMu.Lock()
|
|
||||||
setClockSequence(seq)
|
|
||||||
}
|
|
||||||
|
|
||||||
func setClockSequence(seq int) {
|
|
||||||
if seq == -1 {
|
|
||||||
var b [2]byte
|
|
||||||
randomBits(b[:]) // clock sequence
|
|
||||||
seq = int(b[0])<<8 | int(b[1])
|
|
||||||
}
|
|
||||||
oldSeq := clockSeq
|
|
||||||
clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant
|
|
||||||
if oldSeq != clockSeq {
|
|
||||||
lasttime = 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
|
|
||||||
// uuid. The time is only defined for version 1 and 2 UUIDs.
|
|
||||||
func (uuid UUID) Time() Time {
|
|
||||||
time := int64(binary.BigEndian.Uint32(uuid[0:4]))
|
|
||||||
time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
|
|
||||||
time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
|
|
||||||
return Time(time)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClockSequence returns the clock sequence encoded in uuid.
|
|
||||||
// The clock sequence is only well defined for version 1 and 2 UUIDs.
|
|
||||||
func (uuid UUID) ClockSequence() int {
|
|
||||||
return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff
|
|
||||||
}
|
|
43
vendor/github.com/google/uuid/util.go
generated
vendored
43
vendor/github.com/google/uuid/util.go
generated
vendored
@ -1,43 +0,0 @@
|
|||||||
// Copyright 2016 Google Inc. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package uuid
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// randomBits completely fills slice b with random data.
|
|
||||||
func randomBits(b []byte) {
|
|
||||||
if _, err := io.ReadFull(rander, b); err != nil {
|
|
||||||
panic(err.Error()) // rand should never fail
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// xvalues returns the value of a byte as a hexadecimal digit or 255.
|
|
||||||
var xvalues = [256]byte{
|
|
||||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
|
||||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
|
||||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
|
||||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
|
|
||||||
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
|
||||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
|
||||||
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
|
||||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
|
||||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
|
||||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
|
||||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
|
||||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
|
||||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
|
||||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
|
||||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
|
||||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
|
||||||
}
|
|
||||||
|
|
||||||
// xtob converts hex characters x1 and x2 into a byte.
|
|
||||||
func xtob(x1, x2 byte) (byte, bool) {
|
|
||||||
b1 := xvalues[x1]
|
|
||||||
b2 := xvalues[x2]
|
|
||||||
return (b1 << 4) | b2, b1 != 255 && b2 != 255
|
|
||||||
}
|
|
294
vendor/github.com/google/uuid/uuid.go
generated
vendored
294
vendor/github.com/google/uuid/uuid.go
generated
vendored
@ -1,294 +0,0 @@
|
|||||||
// Copyright 2018 Google Inc. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package uuid
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto/rand"
|
|
||||||
"encoding/hex"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
|
|
||||||
// 4122.
|
|
||||||
type UUID [16]byte
|
|
||||||
|
|
||||||
// A Version represents a UUID's version.
|
|
||||||
type Version byte
|
|
||||||
|
|
||||||
// A Variant represents a UUID's variant.
|
|
||||||
type Variant byte
|
|
||||||
|
|
||||||
// Constants returned by Variant.
|
|
||||||
const (
|
|
||||||
Invalid = Variant(iota) // Invalid UUID
|
|
||||||
RFC4122 // The variant specified in RFC4122
|
|
||||||
Reserved // Reserved, NCS backward compatibility.
|
|
||||||
Microsoft // Reserved, Microsoft Corporation backward compatibility.
|
|
||||||
Future // Reserved for future definition.
|
|
||||||
)
|
|
||||||
|
|
||||||
const randPoolSize = 16 * 16
|
|
||||||
|
|
||||||
var (
|
|
||||||
rander = rand.Reader // random function
|
|
||||||
poolEnabled = false
|
|
||||||
poolMu sync.Mutex
|
|
||||||
poolPos = randPoolSize // protected with poolMu
|
|
||||||
pool [randPoolSize]byte // protected with poolMu
|
|
||||||
)
|
|
||||||
|
|
||||||
type invalidLengthError struct{ len int }
|
|
||||||
|
|
||||||
func (err invalidLengthError) Error() string {
|
|
||||||
return fmt.Sprintf("invalid UUID length: %d", err.len)
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsInvalidLengthError is matcher function for custom error invalidLengthError
|
|
||||||
func IsInvalidLengthError(err error) bool {
|
|
||||||
_, ok := err.(invalidLengthError)
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse decodes s into a UUID or returns an error. Both the standard UUID
|
|
||||||
// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
|
|
||||||
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the
|
|
||||||
// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex
|
|
||||||
// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.
|
|
||||||
func Parse(s string) (UUID, error) {
|
|
||||||
var uuid UUID
|
|
||||||
switch len(s) {
|
|
||||||
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
|
||||||
case 36:
|
|
||||||
|
|
||||||
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
|
||||||
case 36 + 9:
|
|
||||||
if strings.ToLower(s[:9]) != "urn:uuid:" {
|
|
||||||
return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9])
|
|
||||||
}
|
|
||||||
s = s[9:]
|
|
||||||
|
|
||||||
// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
|
|
||||||
case 36 + 2:
|
|
||||||
s = s[1:]
|
|
||||||
|
|
||||||
// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
|
||||||
case 32:
|
|
||||||
var ok bool
|
|
||||||
for i := range uuid {
|
|
||||||
uuid[i], ok = xtob(s[i*2], s[i*2+1])
|
|
||||||
if !ok {
|
|
||||||
return uuid, errors.New("invalid UUID format")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return uuid, nil
|
|
||||||
default:
|
|
||||||
return uuid, invalidLengthError{len(s)}
|
|
||||||
}
|
|
||||||
// s is now at least 36 bytes long
|
|
||||||
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
|
||||||
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
|
|
||||||
return uuid, errors.New("invalid UUID format")
|
|
||||||
}
|
|
||||||
for i, x := range [16]int{
|
|
||||||
0, 2, 4, 6,
|
|
||||||
9, 11,
|
|
||||||
14, 16,
|
|
||||||
19, 21,
|
|
||||||
24, 26, 28, 30, 32, 34} {
|
|
||||||
v, ok := xtob(s[x], s[x+1])
|
|
||||||
if !ok {
|
|
||||||
return uuid, errors.New("invalid UUID format")
|
|
||||||
}
|
|
||||||
uuid[i] = v
|
|
||||||
}
|
|
||||||
return uuid, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseBytes is like Parse, except it parses a byte slice instead of a string.
|
|
||||||
func ParseBytes(b []byte) (UUID, error) {
|
|
||||||
var uuid UUID
|
|
||||||
switch len(b) {
|
|
||||||
case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
|
||||||
case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
|
||||||
if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) {
|
|
||||||
return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9])
|
|
||||||
}
|
|
||||||
b = b[9:]
|
|
||||||
case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
|
|
||||||
b = b[1:]
|
|
||||||
case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
|
||||||
var ok bool
|
|
||||||
for i := 0; i < 32; i += 2 {
|
|
||||||
uuid[i/2], ok = xtob(b[i], b[i+1])
|
|
||||||
if !ok {
|
|
||||||
return uuid, errors.New("invalid UUID format")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return uuid, nil
|
|
||||||
default:
|
|
||||||
return uuid, invalidLengthError{len(b)}
|
|
||||||
}
|
|
||||||
// s is now at least 36 bytes long
|
|
||||||
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
|
||||||
if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' {
|
|
||||||
return uuid, errors.New("invalid UUID format")
|
|
||||||
}
|
|
||||||
for i, x := range [16]int{
|
|
||||||
0, 2, 4, 6,
|
|
||||||
9, 11,
|
|
||||||
14, 16,
|
|
||||||
19, 21,
|
|
||||||
24, 26, 28, 30, 32, 34} {
|
|
||||||
v, ok := xtob(b[x], b[x+1])
|
|
||||||
if !ok {
|
|
||||||
return uuid, errors.New("invalid UUID format")
|
|
||||||
}
|
|
||||||
uuid[i] = v
|
|
||||||
}
|
|
||||||
return uuid, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustParse is like Parse but panics if the string cannot be parsed.
|
|
||||||
// It simplifies safe initialization of global variables holding compiled UUIDs.
|
|
||||||
func MustParse(s string) UUID {
|
|
||||||
uuid, err := Parse(s)
|
|
||||||
if err != nil {
|
|
||||||
panic(`uuid: Parse(` + s + `): ` + err.Error())
|
|
||||||
}
|
|
||||||
return uuid
|
|
||||||
}
|
|
||||||
|
|
||||||
// FromBytes creates a new UUID from a byte slice. Returns an error if the slice
|
|
||||||
// does not have a length of 16. The bytes are copied from the slice.
|
|
||||||
func FromBytes(b []byte) (uuid UUID, err error) {
|
|
||||||
err = uuid.UnmarshalBinary(b)
|
|
||||||
return uuid, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Must returns uuid if err is nil and panics otherwise.
|
|
||||||
func Must(uuid UUID, err error) UUID {
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return uuid
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
|
||||||
// , or "" if uuid is invalid.
|
|
||||||
func (uuid UUID) String() string {
|
|
||||||
var buf [36]byte
|
|
||||||
encodeHex(buf[:], uuid)
|
|
||||||
return string(buf[:])
|
|
||||||
}
|
|
||||||
|
|
||||||
// URN returns the RFC 2141 URN form of uuid,
|
|
||||||
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid.
|
|
||||||
func (uuid UUID) URN() string {
|
|
||||||
var buf [36 + 9]byte
|
|
||||||
copy(buf[:], "urn:uuid:")
|
|
||||||
encodeHex(buf[9:], uuid)
|
|
||||||
return string(buf[:])
|
|
||||||
}
|
|
||||||
|
|
||||||
func encodeHex(dst []byte, uuid UUID) {
|
|
||||||
hex.Encode(dst, uuid[:4])
|
|
||||||
dst[8] = '-'
|
|
||||||
hex.Encode(dst[9:13], uuid[4:6])
|
|
||||||
dst[13] = '-'
|
|
||||||
hex.Encode(dst[14:18], uuid[6:8])
|
|
||||||
dst[18] = '-'
|
|
||||||
hex.Encode(dst[19:23], uuid[8:10])
|
|
||||||
dst[23] = '-'
|
|
||||||
hex.Encode(dst[24:], uuid[10:])
|
|
||||||
}
|
|
||||||
|
|
||||||
// Variant returns the variant encoded in uuid.
|
|
||||||
func (uuid UUID) Variant() Variant {
|
|
||||||
switch {
|
|
||||||
case (uuid[8] & 0xc0) == 0x80:
|
|
||||||
return RFC4122
|
|
||||||
case (uuid[8] & 0xe0) == 0xc0:
|
|
||||||
return Microsoft
|
|
||||||
case (uuid[8] & 0xe0) == 0xe0:
|
|
||||||
return Future
|
|
||||||
default:
|
|
||||||
return Reserved
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Version returns the version of uuid.
|
|
||||||
func (uuid UUID) Version() Version {
|
|
||||||
return Version(uuid[6] >> 4)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v Version) String() string {
|
|
||||||
if v > 15 {
|
|
||||||
return fmt.Sprintf("BAD_VERSION_%d", v)
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("VERSION_%d", v)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v Variant) String() string {
|
|
||||||
switch v {
|
|
||||||
case RFC4122:
|
|
||||||
return "RFC4122"
|
|
||||||
case Reserved:
|
|
||||||
return "Reserved"
|
|
||||||
case Microsoft:
|
|
||||||
return "Microsoft"
|
|
||||||
case Future:
|
|
||||||
return "Future"
|
|
||||||
case Invalid:
|
|
||||||
return "Invalid"
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("BadVariant%d", int(v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetRand sets the random number generator to r, which implements io.Reader.
|
|
||||||
// If r.Read returns an error when the package requests random data then
|
|
||||||
// a panic will be issued.
|
|
||||||
//
|
|
||||||
// Calling SetRand with nil sets the random number generator to the default
|
|
||||||
// generator.
|
|
||||||
func SetRand(r io.Reader) {
|
|
||||||
if r == nil {
|
|
||||||
rander = rand.Reader
|
|
||||||
return
|
|
||||||
}
|
|
||||||
rander = r
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnableRandPool enables internal randomness pool used for Random
|
|
||||||
// (Version 4) UUID generation. The pool contains random bytes read from
|
|
||||||
// the random number generator on demand in batches. Enabling the pool
|
|
||||||
// may improve the UUID generation throughput significantly.
|
|
||||||
//
|
|
||||||
// Since the pool is stored on the Go heap, this feature may be a bad fit
|
|
||||||
// for security sensitive applications.
|
|
||||||
//
|
|
||||||
// Both EnableRandPool and DisableRandPool are not thread-safe and should
|
|
||||||
// only be called when there is no possibility that New or any other
|
|
||||||
// UUID Version 4 generation function will be called concurrently.
|
|
||||||
func EnableRandPool() {
|
|
||||||
poolEnabled = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// DisableRandPool disables the randomness pool if it was previously
|
|
||||||
// enabled with EnableRandPool.
|
|
||||||
//
|
|
||||||
// Both EnableRandPool and DisableRandPool are not thread-safe and should
|
|
||||||
// only be called when there is no possibility that New or any other
|
|
||||||
// UUID Version 4 generation function will be called concurrently.
|
|
||||||
func DisableRandPool() {
|
|
||||||
poolEnabled = false
|
|
||||||
defer poolMu.Unlock()
|
|
||||||
poolMu.Lock()
|
|
||||||
poolPos = randPoolSize
|
|
||||||
}
|
|
44
vendor/github.com/google/uuid/version1.go
generated
vendored
44
vendor/github.com/google/uuid/version1.go
generated
vendored
@ -1,44 +0,0 @@
|
|||||||
// Copyright 2016 Google Inc. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package uuid
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/binary"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewUUID returns a Version 1 UUID based on the current NodeID and clock
|
|
||||||
// sequence, and the current time. If the NodeID has not been set by SetNodeID
|
|
||||||
// or SetNodeInterface then it will be set automatically. If the NodeID cannot
|
|
||||||
// be set NewUUID returns nil. If clock sequence has not been set by
|
|
||||||
// SetClockSequence then it will be set automatically. If GetTime fails to
|
|
||||||
// return the current NewUUID returns nil and an error.
|
|
||||||
//
|
|
||||||
// In most cases, New should be used.
|
|
||||||
func NewUUID() (UUID, error) {
|
|
||||||
var uuid UUID
|
|
||||||
now, seq, err := GetTime()
|
|
||||||
if err != nil {
|
|
||||||
return uuid, err
|
|
||||||
}
|
|
||||||
|
|
||||||
timeLow := uint32(now & 0xffffffff)
|
|
||||||
timeMid := uint16((now >> 32) & 0xffff)
|
|
||||||
timeHi := uint16((now >> 48) & 0x0fff)
|
|
||||||
timeHi |= 0x1000 // Version 1
|
|
||||||
|
|
||||||
binary.BigEndian.PutUint32(uuid[0:], timeLow)
|
|
||||||
binary.BigEndian.PutUint16(uuid[4:], timeMid)
|
|
||||||
binary.BigEndian.PutUint16(uuid[6:], timeHi)
|
|
||||||
binary.BigEndian.PutUint16(uuid[8:], seq)
|
|
||||||
|
|
||||||
nodeMu.Lock()
|
|
||||||
if nodeID == zeroID {
|
|
||||||
setNodeInterface("")
|
|
||||||
}
|
|
||||||
copy(uuid[10:], nodeID[:])
|
|
||||||
nodeMu.Unlock()
|
|
||||||
|
|
||||||
return uuid, nil
|
|
||||||
}
|
|
76
vendor/github.com/google/uuid/version4.go
generated
vendored
76
vendor/github.com/google/uuid/version4.go
generated
vendored
@ -1,76 +0,0 @@
|
|||||||
// Copyright 2016 Google Inc. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package uuid
|
|
||||||
|
|
||||||
import "io"
|
|
||||||
|
|
||||||
// New creates a new random UUID or panics. New is equivalent to
|
|
||||||
// the expression
|
|
||||||
//
|
|
||||||
// uuid.Must(uuid.NewRandom())
|
|
||||||
func New() UUID {
|
|
||||||
return Must(NewRandom())
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewString creates a new random UUID and returns it as a string or panics.
|
|
||||||
// NewString is equivalent to the expression
|
|
||||||
//
|
|
||||||
// uuid.New().String()
|
|
||||||
func NewString() string {
|
|
||||||
return Must(NewRandom()).String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRandom returns a Random (Version 4) UUID.
|
|
||||||
//
|
|
||||||
// The strength of the UUIDs is based on the strength of the crypto/rand
|
|
||||||
// package.
|
|
||||||
//
|
|
||||||
// Uses the randomness pool if it was enabled with EnableRandPool.
|
|
||||||
//
|
|
||||||
// A note about uniqueness derived from the UUID Wikipedia entry:
|
|
||||||
//
|
|
||||||
// Randomly generated UUIDs have 122 random bits. One's annual risk of being
|
|
||||||
// hit by a meteorite is estimated to be one chance in 17 billion, that
|
|
||||||
// means the probability is about 0.00000000006 (6 × 10−11),
|
|
||||||
// equivalent to the odds of creating a few tens of trillions of UUIDs in a
|
|
||||||
// year and having one duplicate.
|
|
||||||
func NewRandom() (UUID, error) {
|
|
||||||
if !poolEnabled {
|
|
||||||
return NewRandomFromReader(rander)
|
|
||||||
}
|
|
||||||
return newRandomFromPool()
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader.
|
|
||||||
func NewRandomFromReader(r io.Reader) (UUID, error) {
|
|
||||||
var uuid UUID
|
|
||||||
_, err := io.ReadFull(r, uuid[:])
|
|
||||||
if err != nil {
|
|
||||||
return Nil, err
|
|
||||||
}
|
|
||||||
uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
|
|
||||||
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
|
|
||||||
return uuid, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newRandomFromPool() (UUID, error) {
|
|
||||||
var uuid UUID
|
|
||||||
poolMu.Lock()
|
|
||||||
if poolPos == randPoolSize {
|
|
||||||
_, err := io.ReadFull(rander, pool[:])
|
|
||||||
if err != nil {
|
|
||||||
poolMu.Unlock()
|
|
||||||
return Nil, err
|
|
||||||
}
|
|
||||||
poolPos = 0
|
|
||||||
}
|
|
||||||
copy(uuid[:], pool[poolPos:(poolPos+16)])
|
|
||||||
poolPos += 16
|
|
||||||
poolMu.Unlock()
|
|
||||||
|
|
||||||
uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
|
|
||||||
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
|
|
||||||
return uuid, nil
|
|
||||||
}
|
|
19
vendor/github.com/kballard/go-shellquote/LICENSE
generated
vendored
19
vendor/github.com/kballard/go-shellquote/LICENSE
generated
vendored
@ -1,19 +0,0 @@
|
|||||||
Copyright (C) 2014 Kevin Ballard
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining
|
|
||||||
a copy of this software and associated documentation files (the "Software"),
|
|
||||||
to deal in the Software without restriction, including without limitation
|
|
||||||
the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
||||||
and/or sell copies of the Software, and to permit persons to whom the
|
|
||||||
Software is furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included
|
|
||||||
in all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
|
||||||
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
|
||||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
|
||||||
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
|
||||||
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
|
|
||||||
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
36
vendor/github.com/kballard/go-shellquote/README
generated
vendored
36
vendor/github.com/kballard/go-shellquote/README
generated
vendored
@ -1,36 +0,0 @@
|
|||||||
PACKAGE
|
|
||||||
|
|
||||||
package shellquote
|
|
||||||
import "github.com/kballard/go-shellquote"
|
|
||||||
|
|
||||||
Shellquote provides utilities for joining/splitting strings using sh's
|
|
||||||
word-splitting rules.
|
|
||||||
|
|
||||||
VARIABLES
|
|
||||||
|
|
||||||
var (
|
|
||||||
UnterminatedSingleQuoteError = errors.New("Unterminated single-quoted string")
|
|
||||||
UnterminatedDoubleQuoteError = errors.New("Unterminated double-quoted string")
|
|
||||||
UnterminatedEscapeError = errors.New("Unterminated backslash-escape")
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
FUNCTIONS
|
|
||||||
|
|
||||||
func Join(args ...string) string
|
|
||||||
Join quotes each argument and joins them with a space. If passed to
|
|
||||||
/bin/sh, the resulting string will be split back into the original
|
|
||||||
arguments.
|
|
||||||
|
|
||||||
func Split(input string) (words []string, err error)
|
|
||||||
Split splits a string according to /bin/sh's word-splitting rules. It
|
|
||||||
supports backslash-escapes, single-quotes, and double-quotes. Notably it
|
|
||||||
does not support the $'' style of quoting. It also doesn't attempt to
|
|
||||||
perform any other sort of expansion, including brace expansion, shell
|
|
||||||
expansion, or pathname expansion.
|
|
||||||
|
|
||||||
If the given input has an unterminated quoted string or ends in a
|
|
||||||
backslash-escape, one of UnterminatedSingleQuoteError,
|
|
||||||
UnterminatedDoubleQuoteError, or UnterminatedEscapeError is returned.
|
|
||||||
|
|
||||||
|
|
3
vendor/github.com/kballard/go-shellquote/doc.go
generated
vendored
3
vendor/github.com/kballard/go-shellquote/doc.go
generated
vendored
@ -1,3 +0,0 @@
|
|||||||
// Shellquote provides utilities for joining/splitting strings using sh's
|
|
||||||
// word-splitting rules.
|
|
||||||
package shellquote
|
|
102
vendor/github.com/kballard/go-shellquote/quote.go
generated
vendored
102
vendor/github.com/kballard/go-shellquote/quote.go
generated
vendored
@ -1,102 +0,0 @@
|
|||||||
package shellquote
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"strings"
|
|
||||||
"unicode/utf8"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Join quotes each argument and joins them with a space.
|
|
||||||
// If passed to /bin/sh, the resulting string will be split back into the
|
|
||||||
// original arguments.
|
|
||||||
func Join(args ...string) string {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
for i, arg := range args {
|
|
||||||
if i != 0 {
|
|
||||||
buf.WriteByte(' ')
|
|
||||||
}
|
|
||||||
quote(arg, &buf)
|
|
||||||
}
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
specialChars = "\\'\"`${[|&;<>()*?!"
|
|
||||||
extraSpecialChars = " \t\n"
|
|
||||||
prefixChars = "~"
|
|
||||||
)
|
|
||||||
|
|
||||||
func quote(word string, buf *bytes.Buffer) {
|
|
||||||
// We want to try to produce a "nice" output. As such, we will
|
|
||||||
// backslash-escape most characters, but if we encounter a space, or if we
|
|
||||||
// encounter an extra-special char (which doesn't work with
|
|
||||||
// backslash-escaping) we switch over to quoting the whole word. We do this
|
|
||||||
// with a space because it's typically easier for people to read multi-word
|
|
||||||
// arguments when quoted with a space rather than with ugly backslashes
|
|
||||||
// everywhere.
|
|
||||||
origLen := buf.Len()
|
|
||||||
|
|
||||||
if len(word) == 0 {
|
|
||||||
// oops, no content
|
|
||||||
buf.WriteString("''")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
cur, prev := word, word
|
|
||||||
atStart := true
|
|
||||||
for len(cur) > 0 {
|
|
||||||
c, l := utf8.DecodeRuneInString(cur)
|
|
||||||
cur = cur[l:]
|
|
||||||
if strings.ContainsRune(specialChars, c) || (atStart && strings.ContainsRune(prefixChars, c)) {
|
|
||||||
// copy the non-special chars up to this point
|
|
||||||
if len(cur) < len(prev) {
|
|
||||||
buf.WriteString(prev[0 : len(prev)-len(cur)-l])
|
|
||||||
}
|
|
||||||
buf.WriteByte('\\')
|
|
||||||
buf.WriteRune(c)
|
|
||||||
prev = cur
|
|
||||||
} else if strings.ContainsRune(extraSpecialChars, c) {
|
|
||||||
// start over in quote mode
|
|
||||||
buf.Truncate(origLen)
|
|
||||||
goto quote
|
|
||||||
}
|
|
||||||
atStart = false
|
|
||||||
}
|
|
||||||
if len(prev) > 0 {
|
|
||||||
buf.WriteString(prev)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
|
|
||||||
quote:
|
|
||||||
// quote mode
|
|
||||||
// Use single-quotes, but if we find a single-quote in the word, we need
|
|
||||||
// to terminate the string, emit an escaped quote, and start the string up
|
|
||||||
// again
|
|
||||||
inQuote := false
|
|
||||||
for len(word) > 0 {
|
|
||||||
i := strings.IndexRune(word, '\'')
|
|
||||||
if i == -1 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if i > 0 {
|
|
||||||
if !inQuote {
|
|
||||||
buf.WriteByte('\'')
|
|
||||||
inQuote = true
|
|
||||||
}
|
|
||||||
buf.WriteString(word[0:i])
|
|
||||||
}
|
|
||||||
word = word[i+1:]
|
|
||||||
if inQuote {
|
|
||||||
buf.WriteByte('\'')
|
|
||||||
inQuote = false
|
|
||||||
}
|
|
||||||
buf.WriteString("\\'")
|
|
||||||
}
|
|
||||||
if len(word) > 0 {
|
|
||||||
if !inQuote {
|
|
||||||
buf.WriteByte('\'')
|
|
||||||
}
|
|
||||||
buf.WriteString(word)
|
|
||||||
buf.WriteByte('\'')
|
|
||||||
}
|
|
||||||
}
|
|
156
vendor/github.com/kballard/go-shellquote/unquote.go
generated
vendored
156
vendor/github.com/kballard/go-shellquote/unquote.go
generated
vendored
@ -1,156 +0,0 @@
|
|||||||
package shellquote
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"strings"
|
|
||||||
"unicode/utf8"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
UnterminatedSingleQuoteError = errors.New("Unterminated single-quoted string")
|
|
||||||
UnterminatedDoubleQuoteError = errors.New("Unterminated double-quoted string")
|
|
||||||
UnterminatedEscapeError = errors.New("Unterminated backslash-escape")
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
splitChars = " \n\t"
|
|
||||||
singleChar = '\''
|
|
||||||
doubleChar = '"'
|
|
||||||
escapeChar = '\\'
|
|
||||||
doubleEscapeChars = "$`\"\n\\"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Split splits a string according to /bin/sh's word-splitting rules. It
|
|
||||||
// supports backslash-escapes, single-quotes, and double-quotes. Notably it does
|
|
||||||
// not support the $'' style of quoting. It also doesn't attempt to perform any
|
|
||||||
// other sort of expansion, including brace expansion, shell expansion, or
|
|
||||||
// pathname expansion.
|
|
||||||
//
|
|
||||||
// If the given input has an unterminated quoted string or ends in a
|
|
||||||
// backslash-escape, one of UnterminatedSingleQuoteError,
|
|
||||||
// UnterminatedDoubleQuoteError, or UnterminatedEscapeError is returned.
|
|
||||||
func Split(input string) (words []string, err error) {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
words = make([]string, 0)
|
|
||||||
|
|
||||||
for len(input) > 0 {
|
|
||||||
// skip any splitChars at the start
|
|
||||||
c, l := utf8.DecodeRuneInString(input)
|
|
||||||
if strings.ContainsRune(splitChars, c) {
|
|
||||||
input = input[l:]
|
|
||||||
continue
|
|
||||||
} else if c == escapeChar {
|
|
||||||
// Look ahead for escaped newline so we can skip over it
|
|
||||||
next := input[l:]
|
|
||||||
if len(next) == 0 {
|
|
||||||
err = UnterminatedEscapeError
|
|
||||||
return
|
|
||||||
}
|
|
||||||
c2, l2 := utf8.DecodeRuneInString(next)
|
|
||||||
if c2 == '\n' {
|
|
||||||
input = next[l2:]
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var word string
|
|
||||||
word, input, err = splitWord(input, &buf)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
words = append(words, word)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func splitWord(input string, buf *bytes.Buffer) (word string, remainder string, err error) {
|
|
||||||
buf.Reset()
|
|
||||||
|
|
||||||
raw:
|
|
||||||
{
|
|
||||||
cur := input
|
|
||||||
for len(cur) > 0 {
|
|
||||||
c, l := utf8.DecodeRuneInString(cur)
|
|
||||||
cur = cur[l:]
|
|
||||||
if c == singleChar {
|
|
||||||
buf.WriteString(input[0 : len(input)-len(cur)-l])
|
|
||||||
input = cur
|
|
||||||
goto single
|
|
||||||
} else if c == doubleChar {
|
|
||||||
buf.WriteString(input[0 : len(input)-len(cur)-l])
|
|
||||||
input = cur
|
|
||||||
goto double
|
|
||||||
} else if c == escapeChar {
|
|
||||||
buf.WriteString(input[0 : len(input)-len(cur)-l])
|
|
||||||
input = cur
|
|
||||||
goto escape
|
|
||||||
} else if strings.ContainsRune(splitChars, c) {
|
|
||||||
buf.WriteString(input[0 : len(input)-len(cur)-l])
|
|
||||||
return buf.String(), cur, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(input) > 0 {
|
|
||||||
buf.WriteString(input)
|
|
||||||
input = ""
|
|
||||||
}
|
|
||||||
goto done
|
|
||||||
}
|
|
||||||
|
|
||||||
escape:
|
|
||||||
{
|
|
||||||
if len(input) == 0 {
|
|
||||||
return "", "", UnterminatedEscapeError
|
|
||||||
}
|
|
||||||
c, l := utf8.DecodeRuneInString(input)
|
|
||||||
if c == '\n' {
|
|
||||||
// a backslash-escaped newline is elided from the output entirely
|
|
||||||
} else {
|
|
||||||
buf.WriteString(input[:l])
|
|
||||||
}
|
|
||||||
input = input[l:]
|
|
||||||
}
|
|
||||||
goto raw
|
|
||||||
|
|
||||||
single:
|
|
||||||
{
|
|
||||||
i := strings.IndexRune(input, singleChar)
|
|
||||||
if i == -1 {
|
|
||||||
return "", "", UnterminatedSingleQuoteError
|
|
||||||
}
|
|
||||||
buf.WriteString(input[0:i])
|
|
||||||
input = input[i+1:]
|
|
||||||
goto raw
|
|
||||||
}
|
|
||||||
|
|
||||||
double:
|
|
||||||
{
|
|
||||||
cur := input
|
|
||||||
for len(cur) > 0 {
|
|
||||||
c, l := utf8.DecodeRuneInString(cur)
|
|
||||||
cur = cur[l:]
|
|
||||||
if c == doubleChar {
|
|
||||||
buf.WriteString(input[0 : len(input)-len(cur)-l])
|
|
||||||
input = cur
|
|
||||||
goto raw
|
|
||||||
} else if c == escapeChar {
|
|
||||||
// bash only supports certain escapes in double-quoted strings
|
|
||||||
c2, l2 := utf8.DecodeRuneInString(cur)
|
|
||||||
cur = cur[l2:]
|
|
||||||
if strings.ContainsRune(doubleEscapeChars, c2) {
|
|
||||||
buf.WriteString(input[0 : len(input)-len(cur)-l-l2])
|
|
||||||
if c2 == '\n' {
|
|
||||||
// newline is special, skip the backslash entirely
|
|
||||||
} else {
|
|
||||||
buf.WriteRune(c2)
|
|
||||||
}
|
|
||||||
input = cur
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return "", "", UnterminatedDoubleQuoteError
|
|
||||||
}
|
|
||||||
|
|
||||||
done:
|
|
||||||
return buf.String(), input, nil
|
|
||||||
}
|
|
28
vendor/github.com/klauspost/compress/LICENSE
generated
vendored
28
vendor/github.com/klauspost/compress/LICENSE
generated
vendored
@ -1,28 +0,0 @@
|
|||||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
|
||||||
Copyright (c) 2019 Klaus Post. All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
|
||||||
modification, are permitted provided that the following conditions are
|
|
||||||
met:
|
|
||||||
|
|
||||||
* Redistributions of source code must retain the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer.
|
|
||||||
* Redistributions in binary form must reproduce the above
|
|
||||||
copyright notice, this list of conditions and the following disclaimer
|
|
||||||
in the documentation and/or other materials provided with the
|
|
||||||
distribution.
|
|
||||||
* Neither the name of Google Inc. nor the names of its
|
|
||||||
contributors may be used to endorse or promote products derived from
|
|
||||||
this software without specific prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
819
vendor/github.com/klauspost/compress/flate/deflate.go
generated
vendored
819
vendor/github.com/klauspost/compress/flate/deflate.go
generated
vendored
@ -1,819 +0,0 @@
|
|||||||
// Copyright 2009 The Go Authors. All rights reserved.
|
|
||||||
// Copyright (c) 2015 Klaus Post
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package flate
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"math"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
NoCompression = 0
|
|
||||||
BestSpeed = 1
|
|
||||||
BestCompression = 9
|
|
||||||
DefaultCompression = -1
|
|
||||||
|
|
||||||
// HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman
|
|
||||||
// entropy encoding. This mode is useful in compressing data that has
|
|
||||||
// already been compressed with an LZ style algorithm (e.g. Snappy or LZ4)
|
|
||||||
// that lacks an entropy encoder. Compression gains are achieved when
|
|
||||||
// certain bytes in the input stream occur more frequently than others.
|
|
||||||
//
|
|
||||||
// Note that HuffmanOnly produces a compressed output that is
|
|
||||||
// RFC 1951 compliant. That is, any valid DEFLATE decompressor will
|
|
||||||
// continue to be able to decompress this output.
|
|
||||||
HuffmanOnly = -2
|
|
||||||
ConstantCompression = HuffmanOnly // compatibility alias.
|
|
||||||
|
|
||||||
logWindowSize = 15
|
|
||||||
windowSize = 1 << logWindowSize
|
|
||||||
windowMask = windowSize - 1
|
|
||||||
logMaxOffsetSize = 15 // Standard DEFLATE
|
|
||||||
minMatchLength = 4 // The smallest match that the compressor looks for
|
|
||||||
maxMatchLength = 258 // The longest match for the compressor
|
|
||||||
minOffsetSize = 1 // The shortest offset that makes any sense
|
|
||||||
|
|
||||||
// The maximum number of tokens we put into a single flat block, just too
|
|
||||||
// stop things from getting too large.
|
|
||||||
maxFlateBlockTokens = 1 << 14
|
|
||||||
maxStoreBlockSize = 65535
|
|
||||||
hashBits = 17 // After 17 performance degrades
|
|
||||||
hashSize = 1 << hashBits
|
|
||||||
hashMask = (1 << hashBits) - 1
|
|
||||||
hashShift = (hashBits + minMatchLength - 1) / minMatchLength
|
|
||||||
maxHashOffset = 1 << 24
|
|
||||||
|
|
||||||
skipNever = math.MaxInt32
|
|
||||||
|
|
||||||
debugDeflate = false
|
|
||||||
)
|
|
||||||
|
|
||||||
type compressionLevel struct {
|
|
||||||
good, lazy, nice, chain, fastSkipHashing, level int
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compression levels have been rebalanced from zlib deflate defaults
|
|
||||||
// to give a bigger spread in speed and compression.
|
|
||||||
// See https://blog.klauspost.com/rebalancing-deflate-compression-levels/
|
|
||||||
var levels = []compressionLevel{
|
|
||||||
{}, // 0
|
|
||||||
// Level 1-6 uses specialized algorithm - values not used
|
|
||||||
{0, 0, 0, 0, 0, 1},
|
|
||||||
{0, 0, 0, 0, 0, 2},
|
|
||||||
{0, 0, 0, 0, 0, 3},
|
|
||||||
{0, 0, 0, 0, 0, 4},
|
|
||||||
{0, 0, 0, 0, 0, 5},
|
|
||||||
{0, 0, 0, 0, 0, 6},
|
|
||||||
// Levels 7-9 use increasingly more lazy matching
|
|
||||||
// and increasingly stringent conditions for "good enough".
|
|
||||||
{8, 8, 24, 16, skipNever, 7},
|
|
||||||
{10, 16, 24, 64, skipNever, 8},
|
|
||||||
{32, 258, 258, 4096, skipNever, 9},
|
|
||||||
}
|
|
||||||
|
|
||||||
// advancedState contains state for the advanced levels, with bigger hash tables, etc.
|
|
||||||
type advancedState struct {
|
|
||||||
// deflate state
|
|
||||||
length int
|
|
||||||
offset int
|
|
||||||
hash uint32
|
|
||||||
maxInsertIndex int
|
|
||||||
ii uint16 // position of last match, intended to overflow to reset.
|
|
||||||
|
|
||||||
// Input hash chains
|
|
||||||
// hashHead[hashValue] contains the largest inputIndex with the specified hash value
|
|
||||||
// If hashHead[hashValue] is within the current window, then
|
|
||||||
// hashPrev[hashHead[hashValue] & windowMask] contains the previous index
|
|
||||||
// with the same hash value.
|
|
||||||
chainHead int
|
|
||||||
hashHead [hashSize]uint32
|
|
||||||
hashPrev [windowSize]uint32
|
|
||||||
hashOffset int
|
|
||||||
|
|
||||||
// input window: unprocessed data is window[index:windowEnd]
|
|
||||||
index int
|
|
||||||
hashMatch [maxMatchLength + minMatchLength]uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
type compressor struct {
|
|
||||||
compressionLevel
|
|
||||||
|
|
||||||
w *huffmanBitWriter
|
|
||||||
|
|
||||||
// compression algorithm
|
|
||||||
fill func(*compressor, []byte) int // copy data to window
|
|
||||||
step func(*compressor) // process window
|
|
||||||
sync bool // requesting flush
|
|
||||||
|
|
||||||
window []byte
|
|
||||||
windowEnd int
|
|
||||||
blockStart int // window index where current tokens start
|
|
||||||
byteAvailable bool // if true, still need to process window[index-1].
|
|
||||||
err error
|
|
||||||
|
|
||||||
// queued output tokens
|
|
||||||
tokens tokens
|
|
||||||
fast fastEnc
|
|
||||||
state *advancedState
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *compressor) fillDeflate(b []byte) int {
|
|
||||||
s := d.state
|
|
||||||
if s.index >= 2*windowSize-(minMatchLength+maxMatchLength) {
|
|
||||||
// shift the window by windowSize
|
|
||||||
copy(d.window[:], d.window[windowSize:2*windowSize])
|
|
||||||
s.index -= windowSize
|
|
||||||
d.windowEnd -= windowSize
|
|
||||||
if d.blockStart >= windowSize {
|
|
||||||
d.blockStart -= windowSize
|
|
||||||
} else {
|
|
||||||
d.blockStart = math.MaxInt32
|
|
||||||
}
|
|
||||||
s.hashOffset += windowSize
|
|
||||||
if s.hashOffset > maxHashOffset {
|
|
||||||
delta := s.hashOffset - 1
|
|
||||||
s.hashOffset -= delta
|
|
||||||
s.chainHead -= delta
|
|
||||||
// Iterate over slices instead of arrays to avoid copying
|
|
||||||
// the entire table onto the stack (Issue #18625).
|
|
||||||
for i, v := range s.hashPrev[:] {
|
|
||||||
if int(v) > delta {
|
|
||||||
s.hashPrev[i] = uint32(int(v) - delta)
|
|
||||||
} else {
|
|
||||||
s.hashPrev[i] = 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for i, v := range s.hashHead[:] {
|
|
||||||
if int(v) > delta {
|
|
||||||
s.hashHead[i] = uint32(int(v) - delta)
|
|
||||||
} else {
|
|
||||||
s.hashHead[i] = 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
n := copy(d.window[d.windowEnd:], b)
|
|
||||||
d.windowEnd += n
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *compressor) writeBlock(tok *tokens, index int, eof bool) error {
|
|
||||||
if index > 0 || eof {
|
|
||||||
var window []byte
|
|
||||||
if d.blockStart <= index {
|
|
||||||
window = d.window[d.blockStart:index]
|
|
||||||
}
|
|
||||||
d.blockStart = index
|
|
||||||
d.w.writeBlock(tok, eof, window)
|
|
||||||
return d.w.err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeBlockSkip writes the current block and uses the number of tokens
|
|
||||||
// to determine if the block should be stored on no matches, or
|
|
||||||
// only huffman encoded.
|
|
||||||
func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error {
|
|
||||||
if index > 0 || eof {
|
|
||||||
if d.blockStart <= index {
|
|
||||||
window := d.window[d.blockStart:index]
|
|
||||||
// If we removed less than a 64th of all literals
|
|
||||||
// we huffman compress the block.
|
|
||||||
if int(tok.n) > len(window)-int(tok.n>>6) {
|
|
||||||
d.w.writeBlockHuff(eof, window, d.sync)
|
|
||||||
} else {
|
|
||||||
// Write a dynamic huffman block.
|
|
||||||
d.w.writeBlockDynamic(tok, eof, window, d.sync)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
d.w.writeBlock(tok, eof, nil)
|
|
||||||
}
|
|
||||||
d.blockStart = index
|
|
||||||
return d.w.err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// fillWindow will fill the current window with the supplied
|
|
||||||
// dictionary and calculate all hashes.
|
|
||||||
// This is much faster than doing a full encode.
|
|
||||||
// Should only be used after a start/reset.
|
|
||||||
func (d *compressor) fillWindow(b []byte) {
|
|
||||||
// Do not fill window if we are in store-only or huffman mode.
|
|
||||||
if d.level <= 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if d.fast != nil {
|
|
||||||
// encode the last data, but discard the result
|
|
||||||
if len(b) > maxMatchOffset {
|
|
||||||
b = b[len(b)-maxMatchOffset:]
|
|
||||||
}
|
|
||||||
d.fast.Encode(&d.tokens, b)
|
|
||||||
d.tokens.Reset()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
s := d.state
|
|
||||||
// If we are given too much, cut it.
|
|
||||||
if len(b) > windowSize {
|
|
||||||
b = b[len(b)-windowSize:]
|
|
||||||
}
|
|
||||||
// Add all to window.
|
|
||||||
n := copy(d.window[d.windowEnd:], b)
|
|
||||||
|
|
||||||
// Calculate 256 hashes at the time (more L1 cache hits)
|
|
||||||
loops := (n + 256 - minMatchLength) / 256
|
|
||||||
for j := 0; j < loops; j++ {
|
|
||||||
startindex := j * 256
|
|
||||||
end := startindex + 256 + minMatchLength - 1
|
|
||||||
if end > n {
|
|
||||||
end = n
|
|
||||||
}
|
|
||||||
tocheck := d.window[startindex:end]
|
|
||||||
dstSize := len(tocheck) - minMatchLength + 1
|
|
||||||
|
|
||||||
if dstSize <= 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
dst := s.hashMatch[:dstSize]
|
|
||||||
bulkHash4(tocheck, dst)
|
|
||||||
var newH uint32
|
|
||||||
for i, val := range dst {
|
|
||||||
di := i + startindex
|
|
||||||
newH = val & hashMask
|
|
||||||
// Get previous value with the same hash.
|
|
||||||
// Our chain should point to the previous value.
|
|
||||||
s.hashPrev[di&windowMask] = s.hashHead[newH]
|
|
||||||
// Set the head of the hash chain to us.
|
|
||||||
s.hashHead[newH] = uint32(di + s.hashOffset)
|
|
||||||
}
|
|
||||||
s.hash = newH
|
|
||||||
}
|
|
||||||
// Update window information.
|
|
||||||
d.windowEnd += n
|
|
||||||
s.index = n
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to find a match starting at index whose length is greater than prevSize.
|
|
||||||
// We only look at chainCount possibilities before giving up.
|
|
||||||
// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead
|
|
||||||
func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) {
|
|
||||||
minMatchLook := maxMatchLength
|
|
||||||
if lookahead < minMatchLook {
|
|
||||||
minMatchLook = lookahead
|
|
||||||
}
|
|
||||||
|
|
||||||
win := d.window[0 : pos+minMatchLook]
|
|
||||||
|
|
||||||
// We quit when we get a match that's at least nice long
|
|
||||||
nice := len(win) - pos
|
|
||||||
if d.nice < nice {
|
|
||||||
nice = d.nice
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we've got a match that's good enough, only look in 1/4 the chain.
|
|
||||||
tries := d.chain
|
|
||||||
length = prevLength
|
|
||||||
if length >= d.good {
|
|
||||||
tries >>= 2
|
|
||||||
}
|
|
||||||
|
|
||||||
wEnd := win[pos+length]
|
|
||||||
wPos := win[pos:]
|
|
||||||
minIndex := pos - windowSize
|
|
||||||
|
|
||||||
for i := prevHead; tries > 0; tries-- {
|
|
||||||
if wEnd == win[i+length] {
|
|
||||||
n := matchLen(win[i:i+minMatchLook], wPos)
|
|
||||||
|
|
||||||
if n > length && (n > minMatchLength || pos-i <= 4096) {
|
|
||||||
length = n
|
|
||||||
offset = pos - i
|
|
||||||
ok = true
|
|
||||||
if n >= nice {
|
|
||||||
// The match is good enough that we don't try to find a better one.
|
|
||||||
break
|
|
||||||
}
|
|
||||||
wEnd = win[pos+n]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if i == minIndex {
|
|
||||||
// hashPrev[i & windowMask] has already been overwritten, so stop now.
|
|
||||||
break
|
|
||||||
}
|
|
||||||
i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset
|
|
||||||
if i < minIndex || i < 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *compressor) writeStoredBlock(buf []byte) error {
|
|
||||||
if d.w.writeStoredHeader(len(buf), false); d.w.err != nil {
|
|
||||||
return d.w.err
|
|
||||||
}
|
|
||||||
d.w.writeBytes(buf)
|
|
||||||
return d.w.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// hash4 returns a hash representation of the first 4 bytes
|
|
||||||
// of the supplied slice.
|
|
||||||
// The caller must ensure that len(b) >= 4.
|
|
||||||
func hash4(b []byte) uint32 {
|
|
||||||
b = b[:4]
|
|
||||||
return hash4u(uint32(b[3])|uint32(b[2])<<8|uint32(b[1])<<16|uint32(b[0])<<24, hashBits)
|
|
||||||
}
|
|
||||||
|
|
||||||
// bulkHash4 will compute hashes using the same
|
|
||||||
// algorithm as hash4
|
|
||||||
func bulkHash4(b []byte, dst []uint32) {
|
|
||||||
if len(b) < 4 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
hb := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
|
|
||||||
dst[0] = hash4u(hb, hashBits)
|
|
||||||
end := len(b) - 4 + 1
|
|
||||||
for i := 1; i < end; i++ {
|
|
||||||
hb = (hb << 8) | uint32(b[i+3])
|
|
||||||
dst[i] = hash4u(hb, hashBits)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *compressor) initDeflate() {
|
|
||||||
d.window = make([]byte, 2*windowSize)
|
|
||||||
d.byteAvailable = false
|
|
||||||
d.err = nil
|
|
||||||
if d.state == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
s := d.state
|
|
||||||
s.index = 0
|
|
||||||
s.hashOffset = 1
|
|
||||||
s.length = minMatchLength - 1
|
|
||||||
s.offset = 0
|
|
||||||
s.hash = 0
|
|
||||||
s.chainHead = -1
|
|
||||||
}
|
|
||||||
|
|
||||||
// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever,
|
|
||||||
// meaning it always has lazy matching on.
|
|
||||||
func (d *compressor) deflateLazy() {
|
|
||||||
s := d.state
|
|
||||||
// Sanity enables additional runtime tests.
|
|
||||||
// It's intended to be used during development
|
|
||||||
// to supplement the currently ad-hoc unit tests.
|
|
||||||
const sanity = debugDeflate
|
|
||||||
|
|
||||||
if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
s.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
|
|
||||||
if s.index < s.maxInsertIndex {
|
|
||||||
s.hash = hash4(d.window[s.index : s.index+minMatchLength])
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
if sanity && s.index > d.windowEnd {
|
|
||||||
panic("index > windowEnd")
|
|
||||||
}
|
|
||||||
lookahead := d.windowEnd - s.index
|
|
||||||
if lookahead < minMatchLength+maxMatchLength {
|
|
||||||
if !d.sync {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if sanity && s.index > d.windowEnd {
|
|
||||||
panic("index > windowEnd")
|
|
||||||
}
|
|
||||||
if lookahead == 0 {
|
|
||||||
// Flush current output block if any.
|
|
||||||
if d.byteAvailable {
|
|
||||||
// There is still one pending token that needs to be flushed
|
|
||||||
d.tokens.AddLiteral(d.window[s.index-1])
|
|
||||||
d.byteAvailable = false
|
|
||||||
}
|
|
||||||
if d.tokens.n > 0 {
|
|
||||||
if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
d.tokens.Reset()
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if s.index < s.maxInsertIndex {
|
|
||||||
// Update the hash
|
|
||||||
s.hash = hash4(d.window[s.index : s.index+minMatchLength])
|
|
||||||
ch := s.hashHead[s.hash&hashMask]
|
|
||||||
s.chainHead = int(ch)
|
|
||||||
s.hashPrev[s.index&windowMask] = ch
|
|
||||||
s.hashHead[s.hash&hashMask] = uint32(s.index + s.hashOffset)
|
|
||||||
}
|
|
||||||
prevLength := s.length
|
|
||||||
prevOffset := s.offset
|
|
||||||
s.length = minMatchLength - 1
|
|
||||||
s.offset = 0
|
|
||||||
minIndex := s.index - windowSize
|
|
||||||
if minIndex < 0 {
|
|
||||||
minIndex = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy {
|
|
||||||
if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, minMatchLength-1, lookahead); ok {
|
|
||||||
s.length = newLength
|
|
||||||
s.offset = newOffset
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if prevLength >= minMatchLength && s.length <= prevLength {
|
|
||||||
// There was a match at the previous step, and the current match is
|
|
||||||
// not better. Output the previous match.
|
|
||||||
d.tokens.AddMatch(uint32(prevLength-3), uint32(prevOffset-minOffsetSize))
|
|
||||||
|
|
||||||
// Insert in the hash table all strings up to the end of the match.
|
|
||||||
// index and index-1 are already inserted. If there is not enough
|
|
||||||
// lookahead, the last two strings are not inserted into the hash
|
|
||||||
// table.
|
|
||||||
var newIndex int
|
|
||||||
newIndex = s.index + prevLength - 1
|
|
||||||
// Calculate missing hashes
|
|
||||||
end := newIndex
|
|
||||||
if end > s.maxInsertIndex {
|
|
||||||
end = s.maxInsertIndex
|
|
||||||
}
|
|
||||||
end += minMatchLength - 1
|
|
||||||
startindex := s.index + 1
|
|
||||||
if startindex > s.maxInsertIndex {
|
|
||||||
startindex = s.maxInsertIndex
|
|
||||||
}
|
|
||||||
tocheck := d.window[startindex:end]
|
|
||||||
dstSize := len(tocheck) - minMatchLength + 1
|
|
||||||
if dstSize > 0 {
|
|
||||||
dst := s.hashMatch[:dstSize]
|
|
||||||
bulkHash4(tocheck, dst)
|
|
||||||
var newH uint32
|
|
||||||
for i, val := range dst {
|
|
||||||
di := i + startindex
|
|
||||||
newH = val & hashMask
|
|
||||||
// Get previous value with the same hash.
|
|
||||||
// Our chain should point to the previous value.
|
|
||||||
s.hashPrev[di&windowMask] = s.hashHead[newH]
|
|
||||||
// Set the head of the hash chain to us.
|
|
||||||
s.hashHead[newH] = uint32(di + s.hashOffset)
|
|
||||||
}
|
|
||||||
s.hash = newH
|
|
||||||
}
|
|
||||||
|
|
||||||
s.index = newIndex
|
|
||||||
d.byteAvailable = false
|
|
||||||
s.length = minMatchLength - 1
|
|
||||||
if d.tokens.n == maxFlateBlockTokens {
|
|
||||||
// The block includes the current character
|
|
||||||
if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
d.tokens.Reset()
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Reset, if we got a match this run.
|
|
||||||
if s.length >= minMatchLength {
|
|
||||||
s.ii = 0
|
|
||||||
}
|
|
||||||
// We have a byte waiting. Emit it.
|
|
||||||
if d.byteAvailable {
|
|
||||||
s.ii++
|
|
||||||
d.tokens.AddLiteral(d.window[s.index-1])
|
|
||||||
if d.tokens.n == maxFlateBlockTokens {
|
|
||||||
if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
d.tokens.Reset()
|
|
||||||
}
|
|
||||||
s.index++
|
|
||||||
|
|
||||||
// If we have a long run of no matches, skip additional bytes
|
|
||||||
// Resets when s.ii overflows after 64KB.
|
|
||||||
if s.ii > 31 {
|
|
||||||
n := int(s.ii >> 5)
|
|
||||||
for j := 0; j < n; j++ {
|
|
||||||
if s.index >= d.windowEnd-1 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
d.tokens.AddLiteral(d.window[s.index-1])
|
|
||||||
if d.tokens.n == maxFlateBlockTokens {
|
|
||||||
if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
d.tokens.Reset()
|
|
||||||
}
|
|
||||||
s.index++
|
|
||||||
}
|
|
||||||
// Flush last byte
|
|
||||||
d.tokens.AddLiteral(d.window[s.index-1])
|
|
||||||
d.byteAvailable = false
|
|
||||||
// s.length = minMatchLength - 1 // not needed, since s.ii is reset above, so it should never be > minMatchLength
|
|
||||||
if d.tokens.n == maxFlateBlockTokens {
|
|
||||||
if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
d.tokens.Reset()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
s.index++
|
|
||||||
d.byteAvailable = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *compressor) store() {
|
|
||||||
if d.windowEnd > 0 && (d.windowEnd == maxStoreBlockSize || d.sync) {
|
|
||||||
d.err = d.writeStoredBlock(d.window[:d.windowEnd])
|
|
||||||
d.windowEnd = 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// fillWindow will fill the buffer with data for huffman-only compression.
|
|
||||||
// The number of bytes copied is returned.
|
|
||||||
func (d *compressor) fillBlock(b []byte) int {
|
|
||||||
n := copy(d.window[d.windowEnd:], b)
|
|
||||||
d.windowEnd += n
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
// storeHuff will compress and store the currently added data,
|
|
||||||
// if enough has been accumulated or we at the end of the stream.
|
|
||||||
// Any error that occurred will be in d.err
|
|
||||||
func (d *compressor) storeHuff() {
|
|
||||||
if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync)
|
|
||||||
d.err = d.w.err
|
|
||||||
d.windowEnd = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// storeFast will compress and store the currently added data,
|
|
||||||
// if enough has been accumulated or we at the end of the stream.
|
|
||||||
// Any error that occurred will be in d.err
|
|
||||||
func (d *compressor) storeFast() {
|
|
||||||
// We only compress if we have maxStoreBlockSize.
|
|
||||||
if d.windowEnd < len(d.window) {
|
|
||||||
if !d.sync {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Handle extremely small sizes.
|
|
||||||
if d.windowEnd < 128 {
|
|
||||||
if d.windowEnd == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if d.windowEnd <= 32 {
|
|
||||||
d.err = d.writeStoredBlock(d.window[:d.windowEnd])
|
|
||||||
} else {
|
|
||||||
d.w.writeBlockHuff(false, d.window[:d.windowEnd], true)
|
|
||||||
d.err = d.w.err
|
|
||||||
}
|
|
||||||
d.tokens.Reset()
|
|
||||||
d.windowEnd = 0
|
|
||||||
d.fast.Reset()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
d.fast.Encode(&d.tokens, d.window[:d.windowEnd])
|
|
||||||
// If we made zero matches, store the block as is.
|
|
||||||
if d.tokens.n == 0 {
|
|
||||||
d.err = d.writeStoredBlock(d.window[:d.windowEnd])
|
|
||||||
// If we removed less than 1/16th, huffman compress the block.
|
|
||||||
} else if int(d.tokens.n) > d.windowEnd-(d.windowEnd>>4) {
|
|
||||||
d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync)
|
|
||||||
d.err = d.w.err
|
|
||||||
} else {
|
|
||||||
d.w.writeBlockDynamic(&d.tokens, false, d.window[:d.windowEnd], d.sync)
|
|
||||||
d.err = d.w.err
|
|
||||||
}
|
|
||||||
d.tokens.Reset()
|
|
||||||
d.windowEnd = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// write will add input byte to the stream.
|
|
||||||
// Unless an error occurs all bytes will be consumed.
|
|
||||||
func (d *compressor) write(b []byte) (n int, err error) {
|
|
||||||
if d.err != nil {
|
|
||||||
return 0, d.err
|
|
||||||
}
|
|
||||||
n = len(b)
|
|
||||||
for len(b) > 0 {
|
|
||||||
d.step(d)
|
|
||||||
b = b[d.fill(d, b):]
|
|
||||||
if d.err != nil {
|
|
||||||
return 0, d.err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n, d.err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *compressor) syncFlush() error {
|
|
||||||
d.sync = true
|
|
||||||
if d.err != nil {
|
|
||||||
return d.err
|
|
||||||
}
|
|
||||||
d.step(d)
|
|
||||||
if d.err == nil {
|
|
||||||
d.w.writeStoredHeader(0, false)
|
|
||||||
d.w.flush()
|
|
||||||
d.err = d.w.err
|
|
||||||
}
|
|
||||||
d.sync = false
|
|
||||||
return d.err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *compressor) init(w io.Writer, level int) (err error) {
|
|
||||||
d.w = newHuffmanBitWriter(w)
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case level == NoCompression:
|
|
||||||
d.window = make([]byte, maxStoreBlockSize)
|
|
||||||
d.fill = (*compressor).fillBlock
|
|
||||||
d.step = (*compressor).store
|
|
||||||
case level == ConstantCompression:
|
|
||||||
d.w.logNewTablePenalty = 4
|
|
||||||
d.window = make([]byte, maxStoreBlockSize)
|
|
||||||
d.fill = (*compressor).fillBlock
|
|
||||||
d.step = (*compressor).storeHuff
|
|
||||||
case level == DefaultCompression:
|
|
||||||
level = 5
|
|
||||||
fallthrough
|
|
||||||
case level >= 1 && level <= 6:
|
|
||||||
d.w.logNewTablePenalty = 6
|
|
||||||
d.fast = newFastEnc(level)
|
|
||||||
d.window = make([]byte, maxStoreBlockSize)
|
|
||||||
d.fill = (*compressor).fillBlock
|
|
||||||
d.step = (*compressor).storeFast
|
|
||||||
case 7 <= level && level <= 9:
|
|
||||||
d.w.logNewTablePenalty = 10
|
|
||||||
d.state = &advancedState{}
|
|
||||||
d.compressionLevel = levels[level]
|
|
||||||
d.initDeflate()
|
|
||||||
d.fill = (*compressor).fillDeflate
|
|
||||||
d.step = (*compressor).deflateLazy
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level)
|
|
||||||
}
|
|
||||||
d.level = level
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// reset the state of the compressor.
|
|
||||||
func (d *compressor) reset(w io.Writer) {
|
|
||||||
d.w.reset(w)
|
|
||||||
d.sync = false
|
|
||||||
d.err = nil
|
|
||||||
// We only need to reset a few things for Snappy.
|
|
||||||
if d.fast != nil {
|
|
||||||
d.fast.Reset()
|
|
||||||
d.windowEnd = 0
|
|
||||||
d.tokens.Reset()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch d.compressionLevel.chain {
|
|
||||||
case 0:
|
|
||||||
// level was NoCompression or ConstantCompresssion.
|
|
||||||
d.windowEnd = 0
|
|
||||||
default:
|
|
||||||
s := d.state
|
|
||||||
s.chainHead = -1
|
|
||||||
for i := range s.hashHead {
|
|
||||||
s.hashHead[i] = 0
|
|
||||||
}
|
|
||||||
for i := range s.hashPrev {
|
|
||||||
s.hashPrev[i] = 0
|
|
||||||
}
|
|
||||||
s.hashOffset = 1
|
|
||||||
s.index, d.windowEnd = 0, 0
|
|
||||||
d.blockStart, d.byteAvailable = 0, false
|
|
||||||
d.tokens.Reset()
|
|
||||||
s.length = minMatchLength - 1
|
|
||||||
s.offset = 0
|
|
||||||
s.hash = 0
|
|
||||||
s.ii = 0
|
|
||||||
s.maxInsertIndex = 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *compressor) close() error {
|
|
||||||
if d.err != nil {
|
|
||||||
return d.err
|
|
||||||
}
|
|
||||||
d.sync = true
|
|
||||||
d.step(d)
|
|
||||||
if d.err != nil {
|
|
||||||
return d.err
|
|
||||||
}
|
|
||||||
if d.w.writeStoredHeader(0, true); d.w.err != nil {
|
|
||||||
return d.w.err
|
|
||||||
}
|
|
||||||
d.w.flush()
|
|
||||||
d.w.reset(nil)
|
|
||||||
return d.w.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWriter returns a new Writer compressing data at the given level.
|
|
||||||
// Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression);
|
|
||||||
// higher levels typically run slower but compress more.
|
|
||||||
// Level 0 (NoCompression) does not attempt any compression; it only adds the
|
|
||||||
// necessary DEFLATE framing.
|
|
||||||
// Level -1 (DefaultCompression) uses the default compression level.
|
|
||||||
// Level -2 (ConstantCompression) will use Huffman compression only, giving
|
|
||||||
// a very fast compression for all types of input, but sacrificing considerable
|
|
||||||
// compression efficiency.
|
|
||||||
//
|
|
||||||
// If level is in the range [-2, 9] then the error returned will be nil.
|
|
||||||
// Otherwise the error returned will be non-nil.
|
|
||||||
func NewWriter(w io.Writer, level int) (*Writer, error) {
|
|
||||||
var dw Writer
|
|
||||||
if err := dw.d.init(w, level); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &dw, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWriterDict is like NewWriter but initializes the new
|
|
||||||
// Writer with a preset dictionary. The returned Writer behaves
|
|
||||||
// as if the dictionary had been written to it without producing
|
|
||||||
// any compressed output. The compressed data written to w
|
|
||||||
// can only be decompressed by a Reader initialized with the
|
|
||||||
// same dictionary.
|
|
||||||
func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) {
|
|
||||||
zw, err := NewWriter(w, level)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
zw.d.fillWindow(dict)
|
|
||||||
zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method.
|
|
||||||
return zw, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Writer takes data written to it and writes the compressed
|
|
||||||
// form of that data to an underlying writer (see NewWriter).
|
|
||||||
type Writer struct {
|
|
||||||
d compressor
|
|
||||||
dict []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write writes data to w, which will eventually write the
|
|
||||||
// compressed form of data to its underlying writer.
|
|
||||||
func (w *Writer) Write(data []byte) (n int, err error) {
|
|
||||||
return w.d.write(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flush flushes any pending data to the underlying writer.
|
|
||||||
// It is useful mainly in compressed network protocols, to ensure that
|
|
||||||
// a remote reader has enough data to reconstruct a packet.
|
|
||||||
// Flush does not return until the data has been written.
|
|
||||||
// Calling Flush when there is no pending data still causes the Writer
|
|
||||||
// to emit a sync marker of at least 4 bytes.
|
|
||||||
// If the underlying writer returns an error, Flush returns that error.
|
|
||||||
//
|
|
||||||
// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH.
|
|
||||||
func (w *Writer) Flush() error {
|
|
||||||
// For more about flushing:
|
|
||||||
// http://www.bolet.org/~pornin/deflate-flush.html
|
|
||||||
return w.d.syncFlush()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close flushes and closes the writer.
|
|
||||||
func (w *Writer) Close() error {
|
|
||||||
return w.d.close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset discards the writer's state and makes it equivalent to
|
|
||||||
// the result of NewWriter or NewWriterDict called with dst
|
|
||||||
// and w's level and dictionary.
|
|
||||||
func (w *Writer) Reset(dst io.Writer) {
|
|
||||||
if len(w.dict) > 0 {
|
|
||||||
// w was created with NewWriterDict
|
|
||||||
w.d.reset(dst)
|
|
||||||
if dst != nil {
|
|
||||||
w.d.fillWindow(w.dict)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// w was created with NewWriter
|
|
||||||
w.d.reset(dst)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResetDict discards the writer's state and makes it equivalent to
|
|
||||||
// the result of NewWriter or NewWriterDict called with dst
|
|
||||||
// and w's level, but sets a specific dictionary.
|
|
||||||
func (w *Writer) ResetDict(dst io.Writer, dict []byte) {
|
|
||||||
w.dict = dict
|
|
||||||
w.d.reset(dst)
|
|
||||||
w.d.fillWindow(w.dict)
|
|
||||||
}
|
|
184
vendor/github.com/klauspost/compress/flate/dict_decoder.go
generated
vendored
184
vendor/github.com/klauspost/compress/flate/dict_decoder.go
generated
vendored
@ -1,184 +0,0 @@
|
|||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package flate
|
|
||||||
|
|
||||||
// dictDecoder implements the LZ77 sliding dictionary as used in decompression.
|
|
||||||
// LZ77 decompresses data through sequences of two forms of commands:
|
|
||||||
//
|
|
||||||
// * Literal insertions: Runs of one or more symbols are inserted into the data
|
|
||||||
// stream as is. This is accomplished through the writeByte method for a
|
|
||||||
// single symbol, or combinations of writeSlice/writeMark for multiple symbols.
|
|
||||||
// Any valid stream must start with a literal insertion if no preset dictionary
|
|
||||||
// is used.
|
|
||||||
//
|
|
||||||
// * Backward copies: Runs of one or more symbols are copied from previously
|
|
||||||
// emitted data. Backward copies come as the tuple (dist, length) where dist
|
|
||||||
// determines how far back in the stream to copy from and length determines how
|
|
||||||
// many bytes to copy. Note that it is valid for the length to be greater than
|
|
||||||
// the distance. Since LZ77 uses forward copies, that situation is used to
|
|
||||||
// perform a form of run-length encoding on repeated runs of symbols.
|
|
||||||
// The writeCopy and tryWriteCopy are used to implement this command.
|
|
||||||
//
|
|
||||||
// For performance reasons, this implementation performs little to no sanity
|
|
||||||
// checks about the arguments. As such, the invariants documented for each
|
|
||||||
// method call must be respected.
|
|
||||||
type dictDecoder struct {
|
|
||||||
hist []byte // Sliding window history
|
|
||||||
|
|
||||||
// Invariant: 0 <= rdPos <= wrPos <= len(hist)
|
|
||||||
wrPos int // Current output position in buffer
|
|
||||||
rdPos int // Have emitted hist[:rdPos] already
|
|
||||||
full bool // Has a full window length been written yet?
|
|
||||||
}
|
|
||||||
|
|
||||||
// init initializes dictDecoder to have a sliding window dictionary of the given
|
|
||||||
// size. If a preset dict is provided, it will initialize the dictionary with
|
|
||||||
// the contents of dict.
|
|
||||||
func (dd *dictDecoder) init(size int, dict []byte) {
|
|
||||||
*dd = dictDecoder{hist: dd.hist}
|
|
||||||
|
|
||||||
if cap(dd.hist) < size {
|
|
||||||
dd.hist = make([]byte, size)
|
|
||||||
}
|
|
||||||
dd.hist = dd.hist[:size]
|
|
||||||
|
|
||||||
if len(dict) > len(dd.hist) {
|
|
||||||
dict = dict[len(dict)-len(dd.hist):]
|
|
||||||
}
|
|
||||||
dd.wrPos = copy(dd.hist, dict)
|
|
||||||
if dd.wrPos == len(dd.hist) {
|
|
||||||
dd.wrPos = 0
|
|
||||||
dd.full = true
|
|
||||||
}
|
|
||||||
dd.rdPos = dd.wrPos
|
|
||||||
}
|
|
||||||
|
|
||||||
// histSize reports the total amount of historical data in the dictionary.
|
|
||||||
func (dd *dictDecoder) histSize() int {
|
|
||||||
if dd.full {
|
|
||||||
return len(dd.hist)
|
|
||||||
}
|
|
||||||
return dd.wrPos
|
|
||||||
}
|
|
||||||
|
|
||||||
// availRead reports the number of bytes that can be flushed by readFlush.
|
|
||||||
func (dd *dictDecoder) availRead() int {
|
|
||||||
return dd.wrPos - dd.rdPos
|
|
||||||
}
|
|
||||||
|
|
||||||
// availWrite reports the available amount of output buffer space.
|
|
||||||
func (dd *dictDecoder) availWrite() int {
|
|
||||||
return len(dd.hist) - dd.wrPos
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeSlice returns a slice of the available buffer to write data to.
|
|
||||||
//
|
|
||||||
// This invariant will be kept: len(s) <= availWrite()
|
|
||||||
func (dd *dictDecoder) writeSlice() []byte {
|
|
||||||
return dd.hist[dd.wrPos:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeMark advances the writer pointer by cnt.
|
|
||||||
//
|
|
||||||
// This invariant must be kept: 0 <= cnt <= availWrite()
|
|
||||||
func (dd *dictDecoder) writeMark(cnt int) {
|
|
||||||
dd.wrPos += cnt
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeByte writes a single byte to the dictionary.
|
|
||||||
//
|
|
||||||
// This invariant must be kept: 0 < availWrite()
|
|
||||||
func (dd *dictDecoder) writeByte(c byte) {
|
|
||||||
dd.hist[dd.wrPos] = c
|
|
||||||
dd.wrPos++
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeCopy copies a string at a given (dist, length) to the output.
|
|
||||||
// This returns the number of bytes copied and may be less than the requested
|
|
||||||
// length if the available space in the output buffer is too small.
|
|
||||||
//
|
|
||||||
// This invariant must be kept: 0 < dist <= histSize()
|
|
||||||
func (dd *dictDecoder) writeCopy(dist, length int) int {
|
|
||||||
dstBase := dd.wrPos
|
|
||||||
dstPos := dstBase
|
|
||||||
srcPos := dstPos - dist
|
|
||||||
endPos := dstPos + length
|
|
||||||
if endPos > len(dd.hist) {
|
|
||||||
endPos = len(dd.hist)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy non-overlapping section after destination position.
|
|
||||||
//
|
|
||||||
// This section is non-overlapping in that the copy length for this section
|
|
||||||
// is always less than or equal to the backwards distance. This can occur
|
|
||||||
// if a distance refers to data that wraps-around in the buffer.
|
|
||||||
// Thus, a backwards copy is performed here; that is, the exact bytes in
|
|
||||||
// the source prior to the copy is placed in the destination.
|
|
||||||
if srcPos < 0 {
|
|
||||||
srcPos += len(dd.hist)
|
|
||||||
dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:])
|
|
||||||
srcPos = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy possibly overlapping section before destination position.
|
|
||||||
//
|
|
||||||
// This section can overlap if the copy length for this section is larger
|
|
||||||
// than the backwards distance. This is allowed by LZ77 so that repeated
|
|
||||||
// strings can be succinctly represented using (dist, length) pairs.
|
|
||||||
// Thus, a forwards copy is performed here; that is, the bytes copied is
|
|
||||||
// possibly dependent on the resulting bytes in the destination as the copy
|
|
||||||
// progresses along. This is functionally equivalent to the following:
|
|
||||||
//
|
|
||||||
// for i := 0; i < endPos-dstPos; i++ {
|
|
||||||
// dd.hist[dstPos+i] = dd.hist[srcPos+i]
|
|
||||||
// }
|
|
||||||
// dstPos = endPos
|
|
||||||
//
|
|
||||||
for dstPos < endPos {
|
|
||||||
dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
|
|
||||||
}
|
|
||||||
|
|
||||||
dd.wrPos = dstPos
|
|
||||||
return dstPos - dstBase
|
|
||||||
}
|
|
||||||
|
|
||||||
// tryWriteCopy tries to copy a string at a given (distance, length) to the
|
|
||||||
// output. This specialized version is optimized for short distances.
|
|
||||||
//
|
|
||||||
// This method is designed to be inlined for performance reasons.
|
|
||||||
//
|
|
||||||
// This invariant must be kept: 0 < dist <= histSize()
|
|
||||||
func (dd *dictDecoder) tryWriteCopy(dist, length int) int {
|
|
||||||
dstPos := dd.wrPos
|
|
||||||
endPos := dstPos + length
|
|
||||||
if dstPos < dist || endPos > len(dd.hist) {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
dstBase := dstPos
|
|
||||||
srcPos := dstPos - dist
|
|
||||||
|
|
||||||
// Copy possibly overlapping section before destination position.
|
|
||||||
loop:
|
|
||||||
dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
|
|
||||||
if dstPos < endPos {
|
|
||||||
goto loop // Avoid for-loop so that this function can be inlined
|
|
||||||
}
|
|
||||||
|
|
||||||
dd.wrPos = dstPos
|
|
||||||
return dstPos - dstBase
|
|
||||||
}
|
|
||||||
|
|
||||||
// readFlush returns a slice of the historical buffer that is ready to be
|
|
||||||
// emitted to the user. The data returned by readFlush must be fully consumed
|
|
||||||
// before calling any other dictDecoder methods.
|
|
||||||
func (dd *dictDecoder) readFlush() []byte {
|
|
||||||
toRead := dd.hist[dd.rdPos:dd.wrPos]
|
|
||||||
dd.rdPos = dd.wrPos
|
|
||||||
if dd.wrPos == len(dd.hist) {
|
|
||||||
dd.wrPos, dd.rdPos = 0, 0
|
|
||||||
dd.full = true
|
|
||||||
}
|
|
||||||
return toRead
|
|
||||||
}
|
|
254
vendor/github.com/klauspost/compress/flate/fast_encoder.go
generated
vendored
254
vendor/github.com/klauspost/compress/flate/fast_encoder.go
generated
vendored
@ -1,254 +0,0 @@
|
|||||||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
|
||||||
// Modified for deflate by Klaus Post (c) 2015.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package flate
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"math/bits"
|
|
||||||
)
|
|
||||||
|
|
||||||
type fastEnc interface {
|
|
||||||
Encode(dst *tokens, src []byte)
|
|
||||||
Reset()
|
|
||||||
}
|
|
||||||
|
|
||||||
func newFastEnc(level int) fastEnc {
|
|
||||||
switch level {
|
|
||||||
case 1:
|
|
||||||
return &fastEncL1{fastGen: fastGen{cur: maxStoreBlockSize}}
|
|
||||||
case 2:
|
|
||||||
return &fastEncL2{fastGen: fastGen{cur: maxStoreBlockSize}}
|
|
||||||
case 3:
|
|
||||||
return &fastEncL3{fastGen: fastGen{cur: maxStoreBlockSize}}
|
|
||||||
case 4:
|
|
||||||
return &fastEncL4{fastGen: fastGen{cur: maxStoreBlockSize}}
|
|
||||||
case 5:
|
|
||||||
return &fastEncL5{fastGen: fastGen{cur: maxStoreBlockSize}}
|
|
||||||
case 6:
|
|
||||||
return &fastEncL6{fastGen: fastGen{cur: maxStoreBlockSize}}
|
|
||||||
default:
|
|
||||||
panic("invalid level specified")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
tableBits = 15 // Bits used in the table
|
|
||||||
tableSize = 1 << tableBits // Size of the table
|
|
||||||
tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32.
|
|
||||||
baseMatchOffset = 1 // The smallest match offset
|
|
||||||
baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5
|
|
||||||
maxMatchOffset = 1 << 15 // The largest match offset
|
|
||||||
|
|
||||||
bTableBits = 17 // Bits used in the big tables
|
|
||||||
bTableSize = 1 << bTableBits // Size of the table
|
|
||||||
allocHistory = maxStoreBlockSize * 10 // Size to preallocate for history.
|
|
||||||
bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize - 1 // Reset the buffer offset when reaching this.
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
prime3bytes = 506832829
|
|
||||||
prime4bytes = 2654435761
|
|
||||||
prime5bytes = 889523592379
|
|
||||||
prime6bytes = 227718039650203
|
|
||||||
prime7bytes = 58295818150454627
|
|
||||||
prime8bytes = 0xcf1bbcdcb7a56463
|
|
||||||
)
|
|
||||||
|
|
||||||
func load32(b []byte, i int) uint32 {
|
|
||||||
// Help the compiler eliminate bounds checks on the read so it can be done in a single read.
|
|
||||||
b = b[i:]
|
|
||||||
b = b[:4]
|
|
||||||
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
|
|
||||||
}
|
|
||||||
|
|
||||||
func load64(b []byte, i int) uint64 {
|
|
||||||
// Help the compiler eliminate bounds checks on the read so it can be done in a single read.
|
|
||||||
b = b[i:]
|
|
||||||
b = b[:8]
|
|
||||||
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
|
|
||||||
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
|
|
||||||
}
|
|
||||||
|
|
||||||
func load3232(b []byte, i int32) uint32 {
|
|
||||||
// Help the compiler eliminate bounds checks on the read so it can be done in a single read.
|
|
||||||
b = b[i:]
|
|
||||||
b = b[:4]
|
|
||||||
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
|
|
||||||
}
|
|
||||||
|
|
||||||
func load6432(b []byte, i int32) uint64 {
|
|
||||||
// Help the compiler eliminate bounds checks on the read so it can be done in a single read.
|
|
||||||
b = b[i:]
|
|
||||||
b = b[:8]
|
|
||||||
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
|
|
||||||
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
|
|
||||||
}
|
|
||||||
|
|
||||||
func hash(u uint32) uint32 {
|
|
||||||
return (u * 0x1e35a7bd) >> tableShift
|
|
||||||
}
|
|
||||||
|
|
||||||
type tableEntry struct {
|
|
||||||
offset int32
|
|
||||||
}
|
|
||||||
|
|
||||||
// fastGen maintains the table for matches,
|
|
||||||
// and the previous byte block for level 2.
|
|
||||||
// This is the generic implementation.
|
|
||||||
type fastGen struct {
|
|
||||||
hist []byte
|
|
||||||
cur int32
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *fastGen) addBlock(src []byte) int32 {
|
|
||||||
// check if we have space already
|
|
||||||
if len(e.hist)+len(src) > cap(e.hist) {
|
|
||||||
if cap(e.hist) == 0 {
|
|
||||||
e.hist = make([]byte, 0, allocHistory)
|
|
||||||
} else {
|
|
||||||
if cap(e.hist) < maxMatchOffset*2 {
|
|
||||||
panic("unexpected buffer size")
|
|
||||||
}
|
|
||||||
// Move down
|
|
||||||
offset := int32(len(e.hist)) - maxMatchOffset
|
|
||||||
copy(e.hist[0:maxMatchOffset], e.hist[offset:])
|
|
||||||
e.cur += offset
|
|
||||||
e.hist = e.hist[:maxMatchOffset]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
s := int32(len(e.hist))
|
|
||||||
e.hist = append(e.hist, src...)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// hash4 returns the hash of u to fit in a hash table with h bits.
|
|
||||||
// Preferably h should be a constant and should always be <32.
|
|
||||||
func hash4u(u uint32, h uint8) uint32 {
|
|
||||||
return (u * prime4bytes) >> ((32 - h) & 31)
|
|
||||||
}
|
|
||||||
|
|
||||||
type tableEntryPrev struct {
|
|
||||||
Cur tableEntry
|
|
||||||
Prev tableEntry
|
|
||||||
}
|
|
||||||
|
|
||||||
// hash4x64 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits.
|
|
||||||
// Preferably h should be a constant and should always be <32.
|
|
||||||
func hash4x64(u uint64, h uint8) uint32 {
|
|
||||||
return (uint32(u) * prime4bytes) >> ((32 - h) & 31)
|
|
||||||
}
|
|
||||||
|
|
||||||
// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
|
|
||||||
// Preferably h should be a constant and should always be <64.
|
|
||||||
func hash7(u uint64, h uint8) uint32 {
|
|
||||||
return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63))
|
|
||||||
}
|
|
||||||
|
|
||||||
// hash8 returns the hash of u to fit in a hash table with h bits.
|
|
||||||
// Preferably h should be a constant and should always be <64.
|
|
||||||
func hash8(u uint64, h uint8) uint32 {
|
|
||||||
return uint32((u * prime8bytes) >> ((64 - h) & 63))
|
|
||||||
}
|
|
||||||
|
|
||||||
// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits.
|
|
||||||
// Preferably h should be a constant and should always be <64.
|
|
||||||
func hash6(u uint64, h uint8) uint32 {
|
|
||||||
return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63))
|
|
||||||
}
|
|
||||||
|
|
||||||
// matchlen will return the match length between offsets and t in src.
|
|
||||||
// The maximum length returned is maxMatchLength - 4.
|
|
||||||
// It is assumed that s > t, that t >=0 and s < len(src).
|
|
||||||
func (e *fastGen) matchlen(s, t int32, src []byte) int32 {
|
|
||||||
if debugDecode {
|
|
||||||
if t >= s {
|
|
||||||
panic(fmt.Sprint("t >=s:", t, s))
|
|
||||||
}
|
|
||||||
if int(s) >= len(src) {
|
|
||||||
panic(fmt.Sprint("s >= len(src):", s, len(src)))
|
|
||||||
}
|
|
||||||
if t < 0 {
|
|
||||||
panic(fmt.Sprint("t < 0:", t))
|
|
||||||
}
|
|
||||||
if s-t > maxMatchOffset {
|
|
||||||
panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
s1 := int(s) + maxMatchLength - 4
|
|
||||||
if s1 > len(src) {
|
|
||||||
s1 = len(src)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extend the match to be as long as possible.
|
|
||||||
return int32(matchLen(src[s:s1], src[t:]))
|
|
||||||
}
|
|
||||||
|
|
||||||
// matchlenLong will return the match length between offsets and t in src.
|
|
||||||
// It is assumed that s > t, that t >=0 and s < len(src).
|
|
||||||
func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 {
|
|
||||||
if debugDecode {
|
|
||||||
if t >= s {
|
|
||||||
panic(fmt.Sprint("t >=s:", t, s))
|
|
||||||
}
|
|
||||||
if int(s) >= len(src) {
|
|
||||||
panic(fmt.Sprint("s >= len(src):", s, len(src)))
|
|
||||||
}
|
|
||||||
if t < 0 {
|
|
||||||
panic(fmt.Sprint("t < 0:", t))
|
|
||||||
}
|
|
||||||
if s-t > maxMatchOffset {
|
|
||||||
panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Extend the match to be as long as possible.
|
|
||||||
return int32(matchLen(src[s:], src[t:]))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset the encoding table.
|
|
||||||
func (e *fastGen) Reset() {
|
|
||||||
if cap(e.hist) < allocHistory {
|
|
||||||
e.hist = make([]byte, 0, allocHistory)
|
|
||||||
}
|
|
||||||
// We offset current position so everything will be out of reach.
|
|
||||||
// If we are above the buffer reset it will be cleared anyway since len(hist) == 0.
|
|
||||||
if e.cur <= bufferReset {
|
|
||||||
e.cur += maxMatchOffset + int32(len(e.hist))
|
|
||||||
}
|
|
||||||
e.hist = e.hist[:0]
|
|
||||||
}
|
|
||||||
|
|
||||||
// matchLen returns the maximum length.
|
|
||||||
// 'a' must be the shortest of the two.
|
|
||||||
func matchLen(a, b []byte) int {
|
|
||||||
b = b[:len(a)]
|
|
||||||
var checked int
|
|
||||||
if len(a) > 4 {
|
|
||||||
// Try 4 bytes first
|
|
||||||
if diff := load32(a, 0) ^ load32(b, 0); diff != 0 {
|
|
||||||
return bits.TrailingZeros32(diff) >> 3
|
|
||||||
}
|
|
||||||
// Switch to 8 byte matching.
|
|
||||||
checked = 4
|
|
||||||
a = a[4:]
|
|
||||||
b = b[4:]
|
|
||||||
for len(a) >= 8 {
|
|
||||||
b = b[:len(a)]
|
|
||||||
if diff := load64(a, 0) ^ load64(b, 0); diff != 0 {
|
|
||||||
return checked + (bits.TrailingZeros64(diff) >> 3)
|
|
||||||
}
|
|
||||||
checked += 8
|
|
||||||
a = a[8:]
|
|
||||||
b = b[8:]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
b = b[:len(a)]
|
|
||||||
for i := range a {
|
|
||||||
if a[i] != b[i] {
|
|
||||||
return int(i) + checked
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return len(a) + checked
|
|
||||||
}
|
|
274
vendor/github.com/klauspost/compress/flate/gen_inflate.go
generated
vendored
274
vendor/github.com/klauspost/compress/flate/gen_inflate.go
generated
vendored
@ -1,274 +0,0 @@
|
|||||||
// +build generate
|
|
||||||
|
|
||||||
//go:generate go run $GOFILE && gofmt -w inflate_gen.go
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
f, err := os.Create("inflate_gen.go")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
types := []string{"*bytes.Buffer", "*bytes.Reader", "*bufio.Reader", "*strings.Reader"}
|
|
||||||
names := []string{"BytesBuffer", "BytesReader", "BufioReader", "StringsReader"}
|
|
||||||
imports := []string{"bytes", "bufio", "io", "strings", "math/bits"}
|
|
||||||
f.WriteString(`// Code generated by go generate gen_inflate.go. DO NOT EDIT.
|
|
||||||
|
|
||||||
package flate
|
|
||||||
|
|
||||||
import (
|
|
||||||
`)
|
|
||||||
|
|
||||||
for _, imp := range imports {
|
|
||||||
f.WriteString("\t\"" + imp + "\"\n")
|
|
||||||
}
|
|
||||||
f.WriteString(")\n\n")
|
|
||||||
|
|
||||||
template := `
|
|
||||||
|
|
||||||
// Decode a single Huffman block from f.
|
|
||||||
// hl and hd are the Huffman states for the lit/length values
|
|
||||||
// and the distance values, respectively. If hd == nil, using the
|
|
||||||
// fixed distance encoding associated with fixed Huffman blocks.
|
|
||||||
func (f *decompressor) $FUNCNAME$() {
|
|
||||||
const (
|
|
||||||
stateInit = iota // Zero value must be stateInit
|
|
||||||
stateDict
|
|
||||||
)
|
|
||||||
fr := f.r.($TYPE$)
|
|
||||||
moreBits := func() error {
|
|
||||||
c, err := fr.ReadByte()
|
|
||||||
if err != nil {
|
|
||||||
return noEOF(err)
|
|
||||||
}
|
|
||||||
f.roffset++
|
|
||||||
f.b |= uint32(c) << f.nb
|
|
||||||
f.nb += 8
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
switch f.stepState {
|
|
||||||
case stateInit:
|
|
||||||
goto readLiteral
|
|
||||||
case stateDict:
|
|
||||||
goto copyHistory
|
|
||||||
}
|
|
||||||
|
|
||||||
readLiteral:
|
|
||||||
// Read literal and/or (length, distance) according to RFC section 3.2.3.
|
|
||||||
{
|
|
||||||
var v int
|
|
||||||
{
|
|
||||||
// Inlined v, err := f.huffSym(f.hl)
|
|
||||||
// Since a huffmanDecoder can be empty or be composed of a degenerate tree
|
|
||||||
// with single element, huffSym must error on these two edge cases. In both
|
|
||||||
// cases, the chunks slice will be 0 for the invalid sequence, leading it
|
|
||||||
// satisfy the n == 0 check below.
|
|
||||||
n := uint(f.hl.maxRead)
|
|
||||||
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
|
|
||||||
// but is smart enough to keep local variables in registers, so use nb and b,
|
|
||||||
// inline call to moreBits and reassign b,nb back to f on return.
|
|
||||||
nb, b := f.nb, f.b
|
|
||||||
for {
|
|
||||||
for nb < n {
|
|
||||||
c, err := fr.ReadByte()
|
|
||||||
if err != nil {
|
|
||||||
f.b = b
|
|
||||||
f.nb = nb
|
|
||||||
f.err = noEOF(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
f.roffset++
|
|
||||||
b |= uint32(c) << (nb & 31)
|
|
||||||
nb += 8
|
|
||||||
}
|
|
||||||
chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
|
|
||||||
n = uint(chunk & huffmanCountMask)
|
|
||||||
if n > huffmanChunkBits {
|
|
||||||
chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask]
|
|
||||||
n = uint(chunk & huffmanCountMask)
|
|
||||||
}
|
|
||||||
if n <= nb {
|
|
||||||
if n == 0 {
|
|
||||||
f.b = b
|
|
||||||
f.nb = nb
|
|
||||||
if debugDecode {
|
|
||||||
fmt.Println("huffsym: n==0")
|
|
||||||
}
|
|
||||||
f.err = CorruptInputError(f.roffset)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
f.b = b >> (n & 31)
|
|
||||||
f.nb = nb - n
|
|
||||||
v = int(chunk >> huffmanValueShift)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var n uint // number of bits extra
|
|
||||||
var length int
|
|
||||||
var err error
|
|
||||||
switch {
|
|
||||||
case v < 256:
|
|
||||||
f.dict.writeByte(byte(v))
|
|
||||||
if f.dict.availWrite() == 0 {
|
|
||||||
f.toRead = f.dict.readFlush()
|
|
||||||
f.step = (*decompressor).$FUNCNAME$
|
|
||||||
f.stepState = stateInit
|
|
||||||
return
|
|
||||||
}
|
|
||||||
goto readLiteral
|
|
||||||
case v == 256:
|
|
||||||
f.finishBlock()
|
|
||||||
return
|
|
||||||
// otherwise, reference to older data
|
|
||||||
case v < 265:
|
|
||||||
length = v - (257 - 3)
|
|
||||||
n = 0
|
|
||||||
case v < 269:
|
|
||||||
length = v*2 - (265*2 - 11)
|
|
||||||
n = 1
|
|
||||||
case v < 273:
|
|
||||||
length = v*4 - (269*4 - 19)
|
|
||||||
n = 2
|
|
||||||
case v < 277:
|
|
||||||
length = v*8 - (273*8 - 35)
|
|
||||||
n = 3
|
|
||||||
case v < 281:
|
|
||||||
length = v*16 - (277*16 - 67)
|
|
||||||
n = 4
|
|
||||||
case v < 285:
|
|
||||||
length = v*32 - (281*32 - 131)
|
|
||||||
n = 5
|
|
||||||
case v < maxNumLit:
|
|
||||||
length = 258
|
|
||||||
n = 0
|
|
||||||
default:
|
|
||||||
if debugDecode {
|
|
||||||
fmt.Println(v, ">= maxNumLit")
|
|
||||||
}
|
|
||||||
f.err = CorruptInputError(f.roffset)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if n > 0 {
|
|
||||||
for f.nb < n {
|
|
||||||
if err = moreBits(); err != nil {
|
|
||||||
if debugDecode {
|
|
||||||
fmt.Println("morebits n>0:", err)
|
|
||||||
}
|
|
||||||
f.err = err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
length += int(f.b & uint32(1<<n-1))
|
|
||||||
f.b >>= n
|
|
||||||
f.nb -= n
|
|
||||||
}
|
|
||||||
|
|
||||||
var dist int
|
|
||||||
if f.hd == nil {
|
|
||||||
for f.nb < 5 {
|
|
||||||
if err = moreBits(); err != nil {
|
|
||||||
if debugDecode {
|
|
||||||
fmt.Println("morebits f.nb<5:", err)
|
|
||||||
}
|
|
||||||
f.err = err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3)))
|
|
||||||
f.b >>= 5
|
|
||||||
f.nb -= 5
|
|
||||||
} else {
|
|
||||||
if dist, err = f.huffSym(f.hd); err != nil {
|
|
||||||
if debugDecode {
|
|
||||||
fmt.Println("huffsym:", err)
|
|
||||||
}
|
|
||||||
f.err = err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case dist < 4:
|
|
||||||
dist++
|
|
||||||
case dist < maxNumDist:
|
|
||||||
nb := uint(dist-2) >> 1
|
|
||||||
// have 1 bit in bottom of dist, need nb more.
|
|
||||||
extra := (dist & 1) << nb
|
|
||||||
for f.nb < nb {
|
|
||||||
if err = moreBits(); err != nil {
|
|
||||||
if debugDecode {
|
|
||||||
fmt.Println("morebits f.nb<nb:", err)
|
|
||||||
}
|
|
||||||
f.err = err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
extra |= int(f.b & uint32(1<<nb-1))
|
|
||||||
f.b >>= nb
|
|
||||||
f.nb -= nb
|
|
||||||
dist = 1<<(nb+1) + 1 + extra
|
|
||||||
default:
|
|
||||||
if debugDecode {
|
|
||||||
fmt.Println("dist too big:", dist, maxNumDist)
|
|
||||||
}
|
|
||||||
f.err = CorruptInputError(f.roffset)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// No check on length; encoding can be prescient.
|
|
||||||
if dist > f.dict.histSize() {
|
|
||||||
if debugDecode {
|
|
||||||
fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
|
|
||||||
}
|
|
||||||
f.err = CorruptInputError(f.roffset)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
f.copyLen, f.copyDist = length, dist
|
|
||||||
goto copyHistory
|
|
||||||
}
|
|
||||||
|
|
||||||
copyHistory:
|
|
||||||
// Perform a backwards copy according to RFC section 3.2.3.
|
|
||||||
{
|
|
||||||
cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
|
|
||||||
if cnt == 0 {
|
|
||||||
cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
|
|
||||||
}
|
|
||||||
f.copyLen -= cnt
|
|
||||||
|
|
||||||
if f.dict.availWrite() == 0 || f.copyLen > 0 {
|
|
||||||
f.toRead = f.dict.readFlush()
|
|
||||||
f.step = (*decompressor).$FUNCNAME$ // We need to continue this work
|
|
||||||
f.stepState = stateDict
|
|
||||||
return
|
|
||||||
}
|
|
||||||
goto readLiteral
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
`
|
|
||||||
for i, t := range types {
|
|
||||||
s := strings.Replace(template, "$FUNCNAME$", "huffman"+names[i], -1)
|
|
||||||
s = strings.Replace(s, "$TYPE$", t, -1)
|
|
||||||
f.WriteString(s)
|
|
||||||
}
|
|
||||||
f.WriteString("func (f *decompressor) huffmanBlockDecoder() func() {\n")
|
|
||||||
f.WriteString("\tswitch f.r.(type) {\n")
|
|
||||||
for i, t := range types {
|
|
||||||
f.WriteString("\t\tcase " + t + ":\n")
|
|
||||||
f.WriteString("\t\t\treturn f.huffman" + names[i] + "\n")
|
|
||||||
}
|
|
||||||
f.WriteString("\t\tdefault:\n")
|
|
||||||
f.WriteString("\t\t\treturn f.huffmanBlockGeneric")
|
|
||||||
f.WriteString("\t}\n}\n")
|
|
||||||
}
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user