diff --git a/Makefile b/Makefile index 92867611d7..abc9c515b9 100644 --- a/Makefile +++ b/Makefile @@ -19,6 +19,7 @@ REPO_INFRA_VERSION = v0.2.5 KUSTOMIZE_VERSION = 5.2.1 OPERATOR_SDK_VERSION ?= v1.25.0 ZEITGEIST_VERSION = v0.5.3 +MDTOC_VERSION = v1.4.0 CI_IMAGE ?= golang:1.22 CONTROLLER_GEN_CMD := CGO_LDFLAGS= $(GO) run $(BUILD_FLAGS) -tags generate sigs.k8s.io/controller-tools/cmd/controller-gen @@ -317,11 +318,13 @@ vagrant-up-flatcar: ## Boot the Vagrant Flatcar based test VM $(call vagrant-up,flatcar,build) $(BUILD_DIR)/mdtoc: $(BUILD_DIR) - $(call go-build,./vendor/sigs.k8s.io/mdtoc) + curl -sSfL -o $(BUILD_DIR)/mdtoc \ + https://storage.googleapis.com/k8s-artifacts-sig-release/kubernetes-sigs/mdtoc/$(MDTOC_VERSION)/mdtoc-$(ARCH)-$(OS) + chmod +x $(BUILD_DIR)/mdtoc .PHONY: update-toc update-toc: $(BUILD_DIR)/mdtoc ## Update the table of contents for the documentation - $(BUILD_DIR)/mdtoc --inplace installation-usage.md + git grep --name-only '' | grep -v Makefile | xargs $(BUILD_DIR)/mdtoc -i $(BUILD_DIR)/recorder.bpf.o: $(BUILD_DIR) ## Build the BPF module $(CLANG) -g -O2 \ diff --git a/README.md b/README.md index c0ae410f81..4f30944978 100644 --- a/README.md +++ b/README.md @@ -6,6 +6,15 @@ [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/5368/badge)](https://bestpractices.coreinfrastructure.org/projects/5368) [![OCI security profiles](https://img.shields.io/badge/oci%3A%2F%2F-security%20profiles-blue?logo=kubernetes&logoColor=white)](https://github.com/orgs/security-profiles/packages) + +- [About](#about) +- [Features](#features) +- [Personas & User Stories](#personas--user-stories) +- [Roadmap](#roadmap) +- [Community, discussion, contribution, and support](#community-discussion-contribution-and-support) + - [Code of conduct](#code-of-conduct) + + This project is the starting point for the _Security Profiles Operator_ (SPO), an out-of-tree Kubernetes enhancement which aims to make it easier for users to use SELinux, seccomp and AppArmor in Kubernetes clusters. diff --git a/dependencies.yaml b/dependencies.yaml index 1a7c8ae39b..68ebadcdda 100644 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -236,6 +236,12 @@ dependencies: - path: Makefile match: ZEITGEIST_VERSION + - name: mdtoc + version: v1.4.0 + refPaths: + - path: Makefile + match: MDTOC_VERSION + - name: yq version: 4.35.2 refPaths: diff --git a/go.mod b/go.mod index 98209c6ad4..e6a4504709 100644 --- a/go.mod +++ b/go.mod @@ -42,7 +42,6 @@ require ( oras.land/oras-go/v2 v2.4.0 sigs.k8s.io/controller-runtime v0.17.3 sigs.k8s.io/controller-tools v0.14.0 - sigs.k8s.io/mdtoc v1.3.0 sigs.k8s.io/release-utils v0.8.1 sigs.k8s.io/yaml v1.4.0 ) @@ -63,7 +62,6 @@ require ( github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/BurntSushi/toml v1.3.2 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/OneOfOne/xxhash v1.2.8 // indirect github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c // indirect @@ -153,7 +151,6 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.4 // indirect - github.com/gomarkdown/markdown v0.0.0-20240328165702-4d01890c35c0 // indirect github.com/google/certificate-transparency-go v1.1.8 // indirect github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect github.com/google/go-github/v55 v55.0.0 // indirect @@ -183,7 +180,6 @@ require ( github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/mmarkdown/mmark v2.0.40+incompatible // indirect github.com/moby/sys/mountinfo v0.7.1 // indirect github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect diff --git a/go.sum b/go.sum index 0921ea6450..f2a0ce4a21 100644 --- a/go.sum +++ b/go.sum @@ -56,8 +56,6 @@ github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBp github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= -github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8= @@ -366,8 +364,6 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/gomarkdown/markdown v0.0.0-20240328165702-4d01890c35c0 h1:4gjrh/PN2MuWCCElk8/I4OCKRKWCCo2zEct3VKCbibU= -github.com/gomarkdown/markdown v0.0.0-20240328165702-4d01890c35c0/go.mod h1:JDGcbDT52eL4fju3sZ4TeHGsQwhG9nbDV21aMyhwPoA= github.com/google/certificate-transparency-go v1.1.8 h1:LGYKkgZF7satzgTak9R4yzfJXEeYVAjV6/EAEJOf1to= github.com/google/certificate-transparency-go v1.1.8/go.mod h1:bV/o8r0TBKRf1X//iiiSgWrvII4d7/8OiA+3vG26gI8= github.com/google/flatbuffers v2.0.8+incompatible h1:ivUb1cGomAB101ZM1T0nOiWz9pSrTMoa9+EiY7igmkM= @@ -508,8 +504,6 @@ github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQ github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mmarkdown/mmark v2.0.40+incompatible h1:vMeUeDzBK3H+/mU0oMVfMuhSXJlIA+DE/DMPQNAj5C4= -github.com/mmarkdown/mmark v2.0.40+incompatible/go.mod h1:Uvmoz7tvsWpr7bMVxIpqZPyN3FbOtzDmnsJDFp7ltJs= github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g= github.com/moby/sys/mountinfo v0.7.1/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= @@ -1001,8 +995,6 @@ sigs.k8s.io/gateway-api v1.0.0 h1:iPTStSv41+d9p0xFydll6d7f7MOBGuqXM6p2/zVYMAs= sigs.k8s.io/gateway-api v1.0.0/go.mod h1:4cUgr0Lnp5FZ0Cdq8FdRwCvpiWws7LVhLHGIudLlf4c= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/mdtoc v1.3.0 h1:iMJLfMax45vMl3rnwLjEhZ38TS6JqLXSpm0uDNo/zyo= -sigs.k8s.io/mdtoc v1.3.0/go.mod h1:8zLWymqzP8oKMm+1m1e5GKnGZq8gbC1MreKfmRelMQA= sigs.k8s.io/release-utils v0.8.1 h1:qSA9p3vZzO6RAq7zvzupCZjR29+n3NK9DSJPe9bSf7w= sigs.k8s.io/release-utils v0.8.1/go.mod h1:vrQ3eR1VmudgX4OUwr4pUZEkYLRms9bdbv06mr3kchQ= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= diff --git a/internal/pkg/tools/tools.go b/internal/pkg/tools/tools.go index 45209b1a11..974d77c6fd 100644 --- a/internal/pkg/tools/tools.go +++ b/internal/pkg/tools/tools.go @@ -28,5 +28,4 @@ import ( _ "google.golang.org/grpc/cmd/protoc-gen-go-grpc" _ "google.golang.org/protobuf/cmd/protoc-gen-go" _ "sigs.k8s.io/controller-tools/cmd/controller-gen" - _ "sigs.k8s.io/mdtoc" ) diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore deleted file mode 100644 index fe79e3adda..0000000000 --- a/vendor/github.com/BurntSushi/toml/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -/toml.test -/toml-test diff --git a/vendor/github.com/BurntSushi/toml/COPYING b/vendor/github.com/BurntSushi/toml/COPYING deleted file mode 100644 index 01b5743200..0000000000 --- a/vendor/github.com/BurntSushi/toml/COPYING +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 TOML authors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md deleted file mode 100644 index 3651cfa960..0000000000 --- a/vendor/github.com/BurntSushi/toml/README.md +++ /dev/null @@ -1,120 +0,0 @@ -TOML stands for Tom's Obvious, Minimal Language. This Go package provides a -reflection interface similar to Go's standard library `json` and `xml` packages. - -Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0). - -Documentation: https://godocs.io/github.com/BurntSushi/toml - -See the [releases page](https://github.com/BurntSushi/toml/releases) for a -changelog; this information is also in the git tag annotations (e.g. `git show -v0.4.0`). - -This library requires Go 1.13 or newer; add it to your go.mod with: - - % go get github.com/BurntSushi/toml@latest - -It also comes with a TOML validator CLI tool: - - % go install github.com/BurntSushi/toml/cmd/tomlv@latest - % tomlv some-toml-file.toml - -### Examples -For the simplest example, consider some TOML file as just a list of keys and -values: - -```toml -Age = 25 -Cats = [ "Cauchy", "Plato" ] -Pi = 3.14 -Perfection = [ 6, 28, 496, 8128 ] -DOB = 1987-07-05T05:45:00Z -``` - -Which can be decoded with: - -```go -type Config struct { - Age int - Cats []string - Pi float64 - Perfection []int - DOB time.Time -} - -var conf Config -_, err := toml.Decode(tomlData, &conf) -``` - -You can also use struct tags if your struct field name doesn't map to a TOML key -value directly: - -```toml -some_key_NAME = "wat" -``` - -```go -type TOML struct { - ObscureKey string `toml:"some_key_NAME"` -} -``` - -Beware that like other decoders **only exported fields** are considered when -encoding and decoding; private fields are silently ignored. - -### Using the `Marshaler` and `encoding.TextUnmarshaler` interfaces -Here's an example that automatically parses values in a `mail.Address`: - -```toml -contacts = [ - "Donald Duck ", - "Scrooge McDuck ", -] -``` - -Can be decoded with: - -```go -// Create address type which satisfies the encoding.TextUnmarshaler interface. -type address struct { - *mail.Address -} - -func (a *address) UnmarshalText(text []byte) error { - var err error - a.Address, err = mail.ParseAddress(string(text)) - return err -} - -// Decode it. -func decode() { - blob := ` - contacts = [ - "Donald Duck ", - "Scrooge McDuck ", - ] - ` - - var contacts struct { - Contacts []address - } - - _, err := toml.Decode(blob, &contacts) - if err != nil { - log.Fatal(err) - } - - for _, c := range contacts.Contacts { - fmt.Printf("%#v\n", c.Address) - } - - // Output: - // &mail.Address{Name:"Donald Duck", Address:"donald@duckburg.com"} - // &mail.Address{Name:"Scrooge McDuck", Address:"scrooge@duckburg.com"} -} -``` - -To target TOML specifically you can implement `UnmarshalTOML` TOML interface in -a similar way. - -### More complex usage -See the [`_example/`](/_example) directory for a more complex example. diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go deleted file mode 100644 index 4d38f3bfce..0000000000 --- a/vendor/github.com/BurntSushi/toml/decode.go +++ /dev/null @@ -1,602 +0,0 @@ -package toml - -import ( - "bytes" - "encoding" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "math" - "os" - "reflect" - "strconv" - "strings" - "time" -) - -// Unmarshaler is the interface implemented by objects that can unmarshal a -// TOML description of themselves. -type Unmarshaler interface { - UnmarshalTOML(interface{}) error -} - -// Unmarshal decodes the contents of data in TOML format into a pointer v. -// -// See [Decoder] for a description of the decoding process. -func Unmarshal(data []byte, v interface{}) error { - _, err := NewDecoder(bytes.NewReader(data)).Decode(v) - return err -} - -// Decode the TOML data in to the pointer v. -// -// See [Decoder] for a description of the decoding process. -func Decode(data string, v interface{}) (MetaData, error) { - return NewDecoder(strings.NewReader(data)).Decode(v) -} - -// DecodeFile reads the contents of a file and decodes it with [Decode]. -func DecodeFile(path string, v interface{}) (MetaData, error) { - fp, err := os.Open(path) - if err != nil { - return MetaData{}, err - } - defer fp.Close() - return NewDecoder(fp).Decode(v) -} - -// Primitive is a TOML value that hasn't been decoded into a Go value. -// -// This type can be used for any value, which will cause decoding to be delayed. -// You can use [PrimitiveDecode] to "manually" decode these values. -// -// NOTE: The underlying representation of a `Primitive` value is subject to -// change. Do not rely on it. -// -// NOTE: Primitive values are still parsed, so using them will only avoid the -// overhead of reflection. They can be useful when you don't know the exact type -// of TOML data until runtime. -type Primitive struct { - undecoded interface{} - context Key -} - -// The significand precision for float32 and float64 is 24 and 53 bits; this is -// the range a natural number can be stored in a float without loss of data. -const ( - maxSafeFloat32Int = 16777215 // 2^24-1 - maxSafeFloat64Int = int64(9007199254740991) // 2^53-1 -) - -// Decoder decodes TOML data. -// -// TOML tables correspond to Go structs or maps; they can be used -// interchangeably, but structs offer better type safety. -// -// TOML table arrays correspond to either a slice of structs or a slice of maps. -// -// TOML datetimes correspond to [time.Time]. Local datetimes are parsed in the -// local timezone. -// -// [time.Duration] types are treated as nanoseconds if the TOML value is an -// integer, or they're parsed with time.ParseDuration() if they're strings. -// -// All other TOML types (float, string, int, bool and array) correspond to the -// obvious Go types. -// -// An exception to the above rules is if a type implements the TextUnmarshaler -// interface, in which case any primitive TOML value (floats, strings, integers, -// booleans, datetimes) will be converted to a []byte and given to the value's -// UnmarshalText method. See the Unmarshaler example for a demonstration with -// email addresses. -// -// # Key mapping -// -// TOML keys can map to either keys in a Go map or field names in a Go struct. -// The special `toml` struct tag can be used to map TOML keys to struct fields -// that don't match the key name exactly (see the example). A case insensitive -// match to struct names will be tried if an exact match can't be found. -// -// The mapping between TOML values and Go values is loose. That is, there may -// exist TOML values that cannot be placed into your representation, and there -// may be parts of your representation that do not correspond to TOML values. -// This loose mapping can be made stricter by using the IsDefined and/or -// Undecoded methods on the MetaData returned. -// -// This decoder does not handle cyclic types. Decode will not terminate if a -// cyclic type is passed. -type Decoder struct { - r io.Reader -} - -// NewDecoder creates a new Decoder. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{r: r} -} - -var ( - unmarshalToml = reflect.TypeOf((*Unmarshaler)(nil)).Elem() - unmarshalText = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() - primitiveType = reflect.TypeOf((*Primitive)(nil)).Elem() -) - -// Decode TOML data in to the pointer `v`. -func (dec *Decoder) Decode(v interface{}) (MetaData, error) { - rv := reflect.ValueOf(v) - if rv.Kind() != reflect.Ptr { - s := "%q" - if reflect.TypeOf(v) == nil { - s = "%v" - } - - return MetaData{}, fmt.Errorf("toml: cannot decode to non-pointer "+s, reflect.TypeOf(v)) - } - if rv.IsNil() { - return MetaData{}, fmt.Errorf("toml: cannot decode to nil value of %q", reflect.TypeOf(v)) - } - - // Check if this is a supported type: struct, map, interface{}, or something - // that implements UnmarshalTOML or UnmarshalText. - rv = indirect(rv) - rt := rv.Type() - if rv.Kind() != reflect.Struct && rv.Kind() != reflect.Map && - !(rv.Kind() == reflect.Interface && rv.NumMethod() == 0) && - !rt.Implements(unmarshalToml) && !rt.Implements(unmarshalText) { - return MetaData{}, fmt.Errorf("toml: cannot decode to type %s", rt) - } - - // TODO: parser should read from io.Reader? Or at the very least, make it - // read from []byte rather than string - data, err := ioutil.ReadAll(dec.r) - if err != nil { - return MetaData{}, err - } - - p, err := parse(string(data)) - if err != nil { - return MetaData{}, err - } - - md := MetaData{ - mapping: p.mapping, - keyInfo: p.keyInfo, - keys: p.ordered, - decoded: make(map[string]struct{}, len(p.ordered)), - context: nil, - data: data, - } - return md, md.unify(p.mapping, rv) -} - -// PrimitiveDecode is just like the other Decode* functions, except it decodes a -// TOML value that has already been parsed. Valid primitive values can *only* be -// obtained from values filled by the decoder functions, including this method. -// (i.e., v may contain more [Primitive] values.) -// -// Meta data for primitive values is included in the meta data returned by the -// Decode* functions with one exception: keys returned by the Undecoded method -// will only reflect keys that were decoded. Namely, any keys hidden behind a -// Primitive will be considered undecoded. Executing this method will update the -// undecoded keys in the meta data. (See the example.) -func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { - md.context = primValue.context - defer func() { md.context = nil }() - return md.unify(primValue.undecoded, rvalue(v)) -} - -// unify performs a sort of type unification based on the structure of `rv`, -// which is the client representation. -// -// Any type mismatch produces an error. Finding a type that we don't know -// how to handle produces an unsupported type error. -func (md *MetaData) unify(data interface{}, rv reflect.Value) error { - // Special case. Look for a `Primitive` value. - // TODO: #76 would make this superfluous after implemented. - if rv.Type() == primitiveType { - // Save the undecoded data and the key context into the primitive - // value. - context := make(Key, len(md.context)) - copy(context, md.context) - rv.Set(reflect.ValueOf(Primitive{ - undecoded: data, - context: context, - })) - return nil - } - - rvi := rv.Interface() - if v, ok := rvi.(Unmarshaler); ok { - return v.UnmarshalTOML(data) - } - if v, ok := rvi.(encoding.TextUnmarshaler); ok { - return md.unifyText(data, v) - } - - // TODO: - // The behavior here is incorrect whenever a Go type satisfies the - // encoding.TextUnmarshaler interface but also corresponds to a TOML hash or - // array. In particular, the unmarshaler should only be applied to primitive - // TOML values. But at this point, it will be applied to all kinds of values - // and produce an incorrect error whenever those values are hashes or arrays - // (including arrays of tables). - - k := rv.Kind() - - if k >= reflect.Int && k <= reflect.Uint64 { - return md.unifyInt(data, rv) - } - switch k { - case reflect.Ptr: - elem := reflect.New(rv.Type().Elem()) - err := md.unify(data, reflect.Indirect(elem)) - if err != nil { - return err - } - rv.Set(elem) - return nil - case reflect.Struct: - return md.unifyStruct(data, rv) - case reflect.Map: - return md.unifyMap(data, rv) - case reflect.Array: - return md.unifyArray(data, rv) - case reflect.Slice: - return md.unifySlice(data, rv) - case reflect.String: - return md.unifyString(data, rv) - case reflect.Bool: - return md.unifyBool(data, rv) - case reflect.Interface: - if rv.NumMethod() > 0 { /// Only empty interfaces are supported. - return md.e("unsupported type %s", rv.Type()) - } - return md.unifyAnything(data, rv) - case reflect.Float32, reflect.Float64: - return md.unifyFloat64(data, rv) - } - return md.e("unsupported type %s", rv.Kind()) -} - -func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { - tmap, ok := mapping.(map[string]interface{}) - if !ok { - if mapping == nil { - return nil - } - return md.e("type mismatch for %s: expected table but found %T", - rv.Type().String(), mapping) - } - - for key, datum := range tmap { - var f *field - fields := cachedTypeFields(rv.Type()) - for i := range fields { - ff := &fields[i] - if ff.name == key { - f = ff - break - } - if f == nil && strings.EqualFold(ff.name, key) { - f = ff - } - } - if f != nil { - subv := rv - for _, i := range f.index { - subv = indirect(subv.Field(i)) - } - - if isUnifiable(subv) { - md.decoded[md.context.add(key).String()] = struct{}{} - md.context = append(md.context, key) - - err := md.unify(datum, subv) - if err != nil { - return err - } - md.context = md.context[0 : len(md.context)-1] - } else if f.name != "" { - return md.e("cannot write unexported field %s.%s", rv.Type().String(), f.name) - } - } - } - return nil -} - -func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { - keyType := rv.Type().Key().Kind() - if keyType != reflect.String && keyType != reflect.Interface { - return fmt.Errorf("toml: cannot decode to a map with non-string key type (%s in %q)", - keyType, rv.Type()) - } - - tmap, ok := mapping.(map[string]interface{}) - if !ok { - if tmap == nil { - return nil - } - return md.badtype("map", mapping) - } - if rv.IsNil() { - rv.Set(reflect.MakeMap(rv.Type())) - } - for k, v := range tmap { - md.decoded[md.context.add(k).String()] = struct{}{} - md.context = append(md.context, k) - - rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) - - err := md.unify(v, indirect(rvval)) - if err != nil { - return err - } - md.context = md.context[0 : len(md.context)-1] - - rvkey := indirect(reflect.New(rv.Type().Key())) - - switch keyType { - case reflect.Interface: - rvkey.Set(reflect.ValueOf(k)) - case reflect.String: - rvkey.SetString(k) - } - - rv.SetMapIndex(rvkey, rvval) - } - return nil -} - -func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { - datav := reflect.ValueOf(data) - if datav.Kind() != reflect.Slice { - if !datav.IsValid() { - return nil - } - return md.badtype("slice", data) - } - if l := datav.Len(); l != rv.Len() { - return md.e("expected array length %d; got TOML array of length %d", rv.Len(), l) - } - return md.unifySliceArray(datav, rv) -} - -func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { - datav := reflect.ValueOf(data) - if datav.Kind() != reflect.Slice { - if !datav.IsValid() { - return nil - } - return md.badtype("slice", data) - } - n := datav.Len() - if rv.IsNil() || rv.Cap() < n { - rv.Set(reflect.MakeSlice(rv.Type(), n, n)) - } - rv.SetLen(n) - return md.unifySliceArray(datav, rv) -} - -func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { - l := data.Len() - for i := 0; i < l; i++ { - err := md.unify(data.Index(i).Interface(), indirect(rv.Index(i))) - if err != nil { - return err - } - } - return nil -} - -func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { - _, ok := rv.Interface().(json.Number) - if ok { - if i, ok := data.(int64); ok { - rv.SetString(strconv.FormatInt(i, 10)) - } else if f, ok := data.(float64); ok { - rv.SetString(strconv.FormatFloat(f, 'f', -1, 64)) - } else { - return md.badtype("string", data) - } - return nil - } - - if s, ok := data.(string); ok { - rv.SetString(s) - return nil - } - return md.badtype("string", data) -} - -func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { - rvk := rv.Kind() - - if num, ok := data.(float64); ok { - switch rvk { - case reflect.Float32: - if num < -math.MaxFloat32 || num > math.MaxFloat32 { - return md.parseErr(errParseRange{i: num, size: rvk.String()}) - } - fallthrough - case reflect.Float64: - rv.SetFloat(num) - default: - panic("bug") - } - return nil - } - - if num, ok := data.(int64); ok { - if (rvk == reflect.Float32 && (num < -maxSafeFloat32Int || num > maxSafeFloat32Int)) || - (rvk == reflect.Float64 && (num < -maxSafeFloat64Int || num > maxSafeFloat64Int)) { - return md.parseErr(errParseRange{i: num, size: rvk.String()}) - } - rv.SetFloat(float64(num)) - return nil - } - - return md.badtype("float", data) -} - -func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { - _, ok := rv.Interface().(time.Duration) - if ok { - // Parse as string duration, and fall back to regular integer parsing - // (as nanosecond) if this is not a string. - if s, ok := data.(string); ok { - dur, err := time.ParseDuration(s) - if err != nil { - return md.parseErr(errParseDuration{s}) - } - rv.SetInt(int64(dur)) - return nil - } - } - - num, ok := data.(int64) - if !ok { - return md.badtype("integer", data) - } - - rvk := rv.Kind() - switch { - case rvk >= reflect.Int && rvk <= reflect.Int64: - if (rvk == reflect.Int8 && (num < math.MinInt8 || num > math.MaxInt8)) || - (rvk == reflect.Int16 && (num < math.MinInt16 || num > math.MaxInt16)) || - (rvk == reflect.Int32 && (num < math.MinInt32 || num > math.MaxInt32)) { - return md.parseErr(errParseRange{i: num, size: rvk.String()}) - } - rv.SetInt(num) - case rvk >= reflect.Uint && rvk <= reflect.Uint64: - unum := uint64(num) - if rvk == reflect.Uint8 && (num < 0 || unum > math.MaxUint8) || - rvk == reflect.Uint16 && (num < 0 || unum > math.MaxUint16) || - rvk == reflect.Uint32 && (num < 0 || unum > math.MaxUint32) { - return md.parseErr(errParseRange{i: num, size: rvk.String()}) - } - rv.SetUint(unum) - default: - panic("unreachable") - } - return nil -} - -func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { - if b, ok := data.(bool); ok { - rv.SetBool(b) - return nil - } - return md.badtype("boolean", data) -} - -func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { - rv.Set(reflect.ValueOf(data)) - return nil -} - -func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) error { - var s string - switch sdata := data.(type) { - case Marshaler: - text, err := sdata.MarshalTOML() - if err != nil { - return err - } - s = string(text) - case encoding.TextMarshaler: - text, err := sdata.MarshalText() - if err != nil { - return err - } - s = string(text) - case fmt.Stringer: - s = sdata.String() - case string: - s = sdata - case bool: - s = fmt.Sprintf("%v", sdata) - case int64: - s = fmt.Sprintf("%d", sdata) - case float64: - s = fmt.Sprintf("%f", sdata) - default: - return md.badtype("primitive (string-like)", data) - } - if err := v.UnmarshalText([]byte(s)); err != nil { - return err - } - return nil -} - -func (md *MetaData) badtype(dst string, data interface{}) error { - return md.e("incompatible types: TOML value has type %T; destination has type %s", data, dst) -} - -func (md *MetaData) parseErr(err error) error { - k := md.context.String() - return ParseError{ - LastKey: k, - Position: md.keyInfo[k].pos, - Line: md.keyInfo[k].pos.Line, - err: err, - input: string(md.data), - } -} - -func (md *MetaData) e(format string, args ...interface{}) error { - f := "toml: " - if len(md.context) > 0 { - f = fmt.Sprintf("toml: (last key %q): ", md.context) - p := md.keyInfo[md.context.String()].pos - if p.Line > 0 { - f = fmt.Sprintf("toml: line %d (last key %q): ", p.Line, md.context) - } - } - return fmt.Errorf(f+format, args...) -} - -// rvalue returns a reflect.Value of `v`. All pointers are resolved. -func rvalue(v interface{}) reflect.Value { - return indirect(reflect.ValueOf(v)) -} - -// indirect returns the value pointed to by a pointer. -// -// Pointers are followed until the value is not a pointer. New values are -// allocated for each nil pointer. -// -// An exception to this rule is if the value satisfies an interface of interest -// to us (like encoding.TextUnmarshaler). -func indirect(v reflect.Value) reflect.Value { - if v.Kind() != reflect.Ptr { - if v.CanSet() { - pv := v.Addr() - pvi := pv.Interface() - if _, ok := pvi.(encoding.TextUnmarshaler); ok { - return pv - } - if _, ok := pvi.(Unmarshaler); ok { - return pv - } - } - return v - } - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - return indirect(reflect.Indirect(v)) -} - -func isUnifiable(rv reflect.Value) bool { - if rv.CanSet() { - return true - } - rvi := rv.Interface() - if _, ok := rvi.(encoding.TextUnmarshaler); ok { - return true - } - if _, ok := rvi.(Unmarshaler); ok { - return true - } - return false -} diff --git a/vendor/github.com/BurntSushi/toml/decode_go116.go b/vendor/github.com/BurntSushi/toml/decode_go116.go deleted file mode 100644 index 086d0b6866..0000000000 --- a/vendor/github.com/BurntSushi/toml/decode_go116.go +++ /dev/null @@ -1,19 +0,0 @@ -//go:build go1.16 -// +build go1.16 - -package toml - -import ( - "io/fs" -) - -// DecodeFS reads the contents of a file from [fs.FS] and decodes it with -// [Decode]. -func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) { - fp, err := fsys.Open(path) - if err != nil { - return MetaData{}, err - } - defer fp.Close() - return NewDecoder(fp).Decode(v) -} diff --git a/vendor/github.com/BurntSushi/toml/deprecated.go b/vendor/github.com/BurntSushi/toml/deprecated.go deleted file mode 100644 index b9e309717e..0000000000 --- a/vendor/github.com/BurntSushi/toml/deprecated.go +++ /dev/null @@ -1,29 +0,0 @@ -package toml - -import ( - "encoding" - "io" -) - -// TextMarshaler is an alias for encoding.TextMarshaler. -// -// Deprecated: use encoding.TextMarshaler -type TextMarshaler encoding.TextMarshaler - -// TextUnmarshaler is an alias for encoding.TextUnmarshaler. -// -// Deprecated: use encoding.TextUnmarshaler -type TextUnmarshaler encoding.TextUnmarshaler - -// PrimitiveDecode is an alias for MetaData.PrimitiveDecode(). -// -// Deprecated: use MetaData.PrimitiveDecode. -func PrimitiveDecode(primValue Primitive, v interface{}) error { - md := MetaData{decoded: make(map[string]struct{})} - return md.unify(primValue.undecoded, rvalue(v)) -} - -// DecodeReader is an alias for NewDecoder(r).Decode(v). -// -// Deprecated: use NewDecoder(reader).Decode(&value). -func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { return NewDecoder(r).Decode(v) } diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go deleted file mode 100644 index 81a7c0fe9f..0000000000 --- a/vendor/github.com/BurntSushi/toml/doc.go +++ /dev/null @@ -1,11 +0,0 @@ -// Package toml implements decoding and encoding of TOML files. -// -// This package supports TOML v1.0.0, as specified at https://toml.io -// -// There is also support for delaying decoding with the Primitive type, and -// querying the set of keys in a TOML document with the MetaData type. -// -// The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator, -// and can be used to verify if TOML document is valid. It can also be used to -// print the type of each key. -package toml diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go deleted file mode 100644 index 9cd25d7571..0000000000 --- a/vendor/github.com/BurntSushi/toml/encode.go +++ /dev/null @@ -1,759 +0,0 @@ -package toml - -import ( - "bufio" - "encoding" - "encoding/json" - "errors" - "fmt" - "io" - "math" - "reflect" - "sort" - "strconv" - "strings" - "time" - - "github.com/BurntSushi/toml/internal" -) - -type tomlEncodeError struct{ error } - -var ( - errArrayNilElement = errors.New("toml: cannot encode array with nil element") - errNonString = errors.New("toml: cannot encode a map with non-string key type") - errNoKey = errors.New("toml: top-level values must be Go maps or structs") - errAnything = errors.New("") // used in testing -) - -var dblQuotedReplacer = strings.NewReplacer( - "\"", "\\\"", - "\\", "\\\\", - "\x00", `\u0000`, - "\x01", `\u0001`, - "\x02", `\u0002`, - "\x03", `\u0003`, - "\x04", `\u0004`, - "\x05", `\u0005`, - "\x06", `\u0006`, - "\x07", `\u0007`, - "\b", `\b`, - "\t", `\t`, - "\n", `\n`, - "\x0b", `\u000b`, - "\f", `\f`, - "\r", `\r`, - "\x0e", `\u000e`, - "\x0f", `\u000f`, - "\x10", `\u0010`, - "\x11", `\u0011`, - "\x12", `\u0012`, - "\x13", `\u0013`, - "\x14", `\u0014`, - "\x15", `\u0015`, - "\x16", `\u0016`, - "\x17", `\u0017`, - "\x18", `\u0018`, - "\x19", `\u0019`, - "\x1a", `\u001a`, - "\x1b", `\u001b`, - "\x1c", `\u001c`, - "\x1d", `\u001d`, - "\x1e", `\u001e`, - "\x1f", `\u001f`, - "\x7f", `\u007f`, -) - -var ( - marshalToml = reflect.TypeOf((*Marshaler)(nil)).Elem() - marshalText = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() - timeType = reflect.TypeOf((*time.Time)(nil)).Elem() -) - -// Marshaler is the interface implemented by types that can marshal themselves -// into valid TOML. -type Marshaler interface { - MarshalTOML() ([]byte, error) -} - -// Encoder encodes a Go to a TOML document. -// -// The mapping between Go values and TOML values should be precisely the same as -// for [Decode]. -// -// time.Time is encoded as a RFC 3339 string, and time.Duration as its string -// representation. -// -// The [Marshaler] and [encoding.TextMarshaler] interfaces are supported to -// encoding the value as custom TOML. -// -// If you want to write arbitrary binary data then you will need to use -// something like base64 since TOML does not have any binary types. -// -// When encoding TOML hashes (Go maps or structs), keys without any sub-hashes -// are encoded first. -// -// Go maps will be sorted alphabetically by key for deterministic output. -// -// The toml struct tag can be used to provide the key name; if omitted the -// struct field name will be used. If the "omitempty" option is present the -// following value will be skipped: -// -// - arrays, slices, maps, and string with len of 0 -// - struct with all zero values -// - bool false -// -// If omitzero is given all int and float types with a value of 0 will be -// skipped. -// -// Encoding Go values without a corresponding TOML representation will return an -// error. Examples of this includes maps with non-string keys, slices with nil -// elements, embedded non-struct types, and nested slices containing maps or -// structs. (e.g. [][]map[string]string is not allowed but []map[string]string -// is okay, as is []map[string][]string). -// -// NOTE: only exported keys are encoded due to the use of reflection. Unexported -// keys are silently discarded. -type Encoder struct { - // String to use for a single indentation level; default is two spaces. - Indent string - - w *bufio.Writer - hasWritten bool // written any output to w yet? -} - -// NewEncoder create a new Encoder. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - w: bufio.NewWriter(w), - Indent: " ", - } -} - -// Encode writes a TOML representation of the Go value to the [Encoder]'s writer. -// -// An error is returned if the value given cannot be encoded to a valid TOML -// document. -func (enc *Encoder) Encode(v interface{}) error { - rv := eindirect(reflect.ValueOf(v)) - err := enc.safeEncode(Key([]string{}), rv) - if err != nil { - return err - } - return enc.w.Flush() -} - -func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { - defer func() { - if r := recover(); r != nil { - if terr, ok := r.(tomlEncodeError); ok { - err = terr.error - return - } - panic(r) - } - }() - enc.encode(key, rv) - return nil -} - -func (enc *Encoder) encode(key Key, rv reflect.Value) { - // If we can marshal the type to text, then we use that. This prevents the - // encoder for handling these types as generic structs (or whatever the - // underlying type of a TextMarshaler is). - switch { - case isMarshaler(rv): - enc.writeKeyValue(key, rv, false) - return - case rv.Type() == primitiveType: // TODO: #76 would make this superfluous after implemented. - enc.encode(key, reflect.ValueOf(rv.Interface().(Primitive).undecoded)) - return - } - - k := rv.Kind() - switch k { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, - reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, - reflect.Uint64, - reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: - enc.writeKeyValue(key, rv, false) - case reflect.Array, reflect.Slice: - if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { - enc.eArrayOfTables(key, rv) - } else { - enc.writeKeyValue(key, rv, false) - } - case reflect.Interface: - if rv.IsNil() { - return - } - enc.encode(key, rv.Elem()) - case reflect.Map: - if rv.IsNil() { - return - } - enc.eTable(key, rv) - case reflect.Ptr: - if rv.IsNil() { - return - } - enc.encode(key, rv.Elem()) - case reflect.Struct: - enc.eTable(key, rv) - default: - encPanic(fmt.Errorf("unsupported type for key '%s': %s", key, k)) - } -} - -// eElement encodes any value that can be an array element. -func (enc *Encoder) eElement(rv reflect.Value) { - switch v := rv.Interface().(type) { - case time.Time: // Using TextMarshaler adds extra quotes, which we don't want. - format := time.RFC3339Nano - switch v.Location() { - case internal.LocalDatetime: - format = "2006-01-02T15:04:05.999999999" - case internal.LocalDate: - format = "2006-01-02" - case internal.LocalTime: - format = "15:04:05.999999999" - } - switch v.Location() { - default: - enc.wf(v.Format(format)) - case internal.LocalDatetime, internal.LocalDate, internal.LocalTime: - enc.wf(v.In(time.UTC).Format(format)) - } - return - case Marshaler: - s, err := v.MarshalTOML() - if err != nil { - encPanic(err) - } - if s == nil { - encPanic(errors.New("MarshalTOML returned nil and no error")) - } - enc.w.Write(s) - return - case encoding.TextMarshaler: - s, err := v.MarshalText() - if err != nil { - encPanic(err) - } - if s == nil { - encPanic(errors.New("MarshalText returned nil and no error")) - } - enc.writeQuoted(string(s)) - return - case time.Duration: - enc.writeQuoted(v.String()) - return - case json.Number: - n, _ := rv.Interface().(json.Number) - - if n == "" { /// Useful zero value. - enc.w.WriteByte('0') - return - } else if v, err := n.Int64(); err == nil { - enc.eElement(reflect.ValueOf(v)) - return - } else if v, err := n.Float64(); err == nil { - enc.eElement(reflect.ValueOf(v)) - return - } - encPanic(fmt.Errorf("unable to convert %q to int64 or float64", n)) - } - - switch rv.Kind() { - case reflect.Ptr: - enc.eElement(rv.Elem()) - return - case reflect.String: - enc.writeQuoted(rv.String()) - case reflect.Bool: - enc.wf(strconv.FormatBool(rv.Bool())) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - enc.wf(strconv.FormatInt(rv.Int(), 10)) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - enc.wf(strconv.FormatUint(rv.Uint(), 10)) - case reflect.Float32: - f := rv.Float() - if math.IsNaN(f) { - enc.wf("nan") - } else if math.IsInf(f, 0) { - enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)]) - } else { - enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32))) - } - case reflect.Float64: - f := rv.Float() - if math.IsNaN(f) { - enc.wf("nan") - } else if math.IsInf(f, 0) { - enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)]) - } else { - enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64))) - } - case reflect.Array, reflect.Slice: - enc.eArrayOrSliceElement(rv) - case reflect.Struct: - enc.eStruct(nil, rv, true) - case reflect.Map: - enc.eMap(nil, rv, true) - case reflect.Interface: - enc.eElement(rv.Elem()) - default: - encPanic(fmt.Errorf("unexpected type: %T", rv.Interface())) - } -} - -// By the TOML spec, all floats must have a decimal with at least one number on -// either side. -func floatAddDecimal(fstr string) string { - if !strings.Contains(fstr, ".") { - return fstr + ".0" - } - return fstr -} - -func (enc *Encoder) writeQuoted(s string) { - enc.wf("\"%s\"", dblQuotedReplacer.Replace(s)) -} - -func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { - length := rv.Len() - enc.wf("[") - for i := 0; i < length; i++ { - elem := eindirect(rv.Index(i)) - enc.eElement(elem) - if i != length-1 { - enc.wf(", ") - } - } - enc.wf("]") -} - -func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { - if len(key) == 0 { - encPanic(errNoKey) - } - for i := 0; i < rv.Len(); i++ { - trv := eindirect(rv.Index(i)) - if isNil(trv) { - continue - } - enc.newline() - enc.wf("%s[[%s]]", enc.indentStr(key), key) - enc.newline() - enc.eMapOrStruct(key, trv, false) - } -} - -func (enc *Encoder) eTable(key Key, rv reflect.Value) { - if len(key) == 1 { - // Output an extra newline between top-level tables. - // (The newline isn't written if nothing else has been written though.) - enc.newline() - } - if len(key) > 0 { - enc.wf("%s[%s]", enc.indentStr(key), key) - enc.newline() - } - enc.eMapOrStruct(key, rv, false) -} - -func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value, inline bool) { - switch rv.Kind() { - case reflect.Map: - enc.eMap(key, rv, inline) - case reflect.Struct: - enc.eStruct(key, rv, inline) - default: - // Should never happen? - panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) - } -} - -func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { - rt := rv.Type() - if rt.Key().Kind() != reflect.String { - encPanic(errNonString) - } - - // Sort keys so that we have deterministic output. And write keys directly - // underneath this key first, before writing sub-structs or sub-maps. - var mapKeysDirect, mapKeysSub []string - for _, mapKey := range rv.MapKeys() { - k := mapKey.String() - if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) { - mapKeysSub = append(mapKeysSub, k) - } else { - mapKeysDirect = append(mapKeysDirect, k) - } - } - - var writeMapKeys = func(mapKeys []string, trailC bool) { - sort.Strings(mapKeys) - for i, mapKey := range mapKeys { - val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey))) - if isNil(val) { - continue - } - - if inline { - enc.writeKeyValue(Key{mapKey}, val, true) - if trailC || i != len(mapKeys)-1 { - enc.wf(", ") - } - } else { - enc.encode(key.add(mapKey), val) - } - } - } - - if inline { - enc.wf("{") - } - writeMapKeys(mapKeysDirect, len(mapKeysSub) > 0) - writeMapKeys(mapKeysSub, false) - if inline { - enc.wf("}") - } -} - -const is32Bit = (32 << (^uint(0) >> 63)) == 32 - -func pointerTo(t reflect.Type) reflect.Type { - if t.Kind() == reflect.Ptr { - return pointerTo(t.Elem()) - } - return t -} - -func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { - // Write keys for fields directly under this key first, because if we write - // a field that creates a new table then all keys under it will be in that - // table (not the one we're writing here). - // - // Fields is a [][]int: for fieldsDirect this always has one entry (the - // struct index). For fieldsSub it contains two entries: the parent field - // index from tv, and the field indexes for the fields of the sub. - var ( - rt = rv.Type() - fieldsDirect, fieldsSub [][]int - addFields func(rt reflect.Type, rv reflect.Value, start []int) - ) - addFields = func(rt reflect.Type, rv reflect.Value, start []int) { - for i := 0; i < rt.NumField(); i++ { - f := rt.Field(i) - isEmbed := f.Anonymous && pointerTo(f.Type).Kind() == reflect.Struct - if f.PkgPath != "" && !isEmbed { /// Skip unexported fields. - continue - } - opts := getOptions(f.Tag) - if opts.skip { - continue - } - - frv := eindirect(rv.Field(i)) - - if is32Bit { - // Copy so it works correct on 32bit archs; not clear why this - // is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4 - // This also works fine on 64bit, but 32bit archs are somewhat - // rare and this is a wee bit faster. - copyStart := make([]int, len(start)) - copy(copyStart, start) - start = copyStart - } - - // Treat anonymous struct fields with tag names as though they are - // not anonymous, like encoding/json does. - // - // Non-struct anonymous fields use the normal encoding logic. - if isEmbed { - if getOptions(f.Tag).name == "" && frv.Kind() == reflect.Struct { - addFields(frv.Type(), frv, append(start, f.Index...)) - continue - } - } - - if typeIsTable(tomlTypeOfGo(frv)) { - fieldsSub = append(fieldsSub, append(start, f.Index...)) - } else { - fieldsDirect = append(fieldsDirect, append(start, f.Index...)) - } - } - } - addFields(rt, rv, nil) - - writeFields := func(fields [][]int) { - for _, fieldIndex := range fields { - fieldType := rt.FieldByIndex(fieldIndex) - fieldVal := rv.FieldByIndex(fieldIndex) - - opts := getOptions(fieldType.Tag) - if opts.skip { - continue - } - if opts.omitempty && isEmpty(fieldVal) { - continue - } - - fieldVal = eindirect(fieldVal) - - if isNil(fieldVal) { /// Don't write anything for nil fields. - continue - } - - keyName := fieldType.Name - if opts.name != "" { - keyName = opts.name - } - - if opts.omitzero && isZero(fieldVal) { - continue - } - - if inline { - enc.writeKeyValue(Key{keyName}, fieldVal, true) - if fieldIndex[0] != len(fields)-1 { - enc.wf(", ") - } - } else { - enc.encode(key.add(keyName), fieldVal) - } - } - } - - if inline { - enc.wf("{") - } - writeFields(fieldsDirect) - writeFields(fieldsSub) - if inline { - enc.wf("}") - } -} - -// tomlTypeOfGo returns the TOML type name of the Go value's type. -// -// It is used to determine whether the types of array elements are mixed (which -// is forbidden). If the Go value is nil, then it is illegal for it to be an -// array element, and valueIsNil is returned as true. -// -// The type may be `nil`, which means no concrete TOML type could be found. -func tomlTypeOfGo(rv reflect.Value) tomlType { - if isNil(rv) || !rv.IsValid() { - return nil - } - - if rv.Kind() == reflect.Struct { - if rv.Type() == timeType { - return tomlDatetime - } - if isMarshaler(rv) { - return tomlString - } - return tomlHash - } - - if isMarshaler(rv) { - return tomlString - } - - switch rv.Kind() { - case reflect.Bool: - return tomlBool - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, - reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, - reflect.Uint64: - return tomlInteger - case reflect.Float32, reflect.Float64: - return tomlFloat - case reflect.Array, reflect.Slice: - if isTableArray(rv) { - return tomlArrayHash - } - return tomlArray - case reflect.Ptr, reflect.Interface: - return tomlTypeOfGo(rv.Elem()) - case reflect.String: - return tomlString - case reflect.Map: - return tomlHash - default: - encPanic(errors.New("unsupported type: " + rv.Kind().String())) - panic("unreachable") - } -} - -func isMarshaler(rv reflect.Value) bool { - return rv.Type().Implements(marshalText) || rv.Type().Implements(marshalToml) -} - -// isTableArray reports if all entries in the array or slice are a table. -func isTableArray(arr reflect.Value) bool { - if isNil(arr) || !arr.IsValid() || arr.Len() == 0 { - return false - } - - ret := true - for i := 0; i < arr.Len(); i++ { - tt := tomlTypeOfGo(eindirect(arr.Index(i))) - // Don't allow nil. - if tt == nil { - encPanic(errArrayNilElement) - } - - if ret && !typeEqual(tomlHash, tt) { - ret = false - } - } - return ret -} - -type tagOptions struct { - skip bool // "-" - name string - omitempty bool - omitzero bool -} - -func getOptions(tag reflect.StructTag) tagOptions { - t := tag.Get("toml") - if t == "-" { - return tagOptions{skip: true} - } - var opts tagOptions - parts := strings.Split(t, ",") - opts.name = parts[0] - for _, s := range parts[1:] { - switch s { - case "omitempty": - opts.omitempty = true - case "omitzero": - opts.omitzero = true - } - } - return opts -} - -func isZero(rv reflect.Value) bool { - switch rv.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return rv.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return rv.Uint() == 0 - case reflect.Float32, reflect.Float64: - return rv.Float() == 0.0 - } - return false -} - -func isEmpty(rv reflect.Value) bool { - switch rv.Kind() { - case reflect.Array, reflect.Slice, reflect.Map, reflect.String: - return rv.Len() == 0 - case reflect.Struct: - if rv.Type().Comparable() { - return reflect.Zero(rv.Type()).Interface() == rv.Interface() - } - // Need to also check if all the fields are empty, otherwise something - // like this with uncomparable types will always return true: - // - // type a struct{ field b } - // type b struct{ s []string } - // s := a{field: b{s: []string{"AAA"}}} - for i := 0; i < rv.NumField(); i++ { - if !isEmpty(rv.Field(i)) { - return false - } - } - return true - case reflect.Bool: - return !rv.Bool() - case reflect.Ptr: - return rv.IsNil() - } - return false -} - -func (enc *Encoder) newline() { - if enc.hasWritten { - enc.wf("\n") - } -} - -// Write a key/value pair: -// -// key = -// -// This is also used for "k = v" in inline tables; so something like this will -// be written in three calls: -// -// ┌───────────────────┐ -// │ ┌───┐ ┌────┐│ -// v v v v vv -// key = {k = 1, k2 = 2} -func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) { - /// Marshaler used on top-level document; call eElement() to just call - /// Marshal{TOML,Text}. - if len(key) == 0 { - enc.eElement(val) - return - } - enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) - enc.eElement(val) - if !inline { - enc.newline() - } -} - -func (enc *Encoder) wf(format string, v ...interface{}) { - _, err := fmt.Fprintf(enc.w, format, v...) - if err != nil { - encPanic(err) - } - enc.hasWritten = true -} - -func (enc *Encoder) indentStr(key Key) string { - return strings.Repeat(enc.Indent, len(key)-1) -} - -func encPanic(err error) { - panic(tomlEncodeError{err}) -} - -// Resolve any level of pointers to the actual value (e.g. **string → string). -func eindirect(v reflect.Value) reflect.Value { - if v.Kind() != reflect.Ptr && v.Kind() != reflect.Interface { - if isMarshaler(v) { - return v - } - if v.CanAddr() { /// Special case for marshalers; see #358. - if pv := v.Addr(); isMarshaler(pv) { - return pv - } - } - return v - } - - if v.IsNil() { - return v - } - - return eindirect(v.Elem()) -} - -func isNil(rv reflect.Value) bool { - switch rv.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return rv.IsNil() - default: - return false - } -} diff --git a/vendor/github.com/BurntSushi/toml/error.go b/vendor/github.com/BurntSushi/toml/error.go deleted file mode 100644 index efd68865bb..0000000000 --- a/vendor/github.com/BurntSushi/toml/error.go +++ /dev/null @@ -1,279 +0,0 @@ -package toml - -import ( - "fmt" - "strings" -) - -// ParseError is returned when there is an error parsing the TOML syntax such as -// invalid syntax, duplicate keys, etc. -// -// In addition to the error message itself, you can also print detailed location -// information with context by using [ErrorWithPosition]: -// -// toml: error: Key 'fruit' was already created and cannot be used as an array. -// -// At line 4, column 2-7: -// -// 2 | fruit = [] -// 3 | -// 4 | [[fruit]] # Not allowed -// ^^^^^ -// -// [ErrorWithUsage] can be used to print the above with some more detailed usage -// guidance: -// -// toml: error: newlines not allowed within inline tables -// -// At line 1, column 18: -// -// 1 | x = [{ key = 42 # -// ^ -// -// Error help: -// -// Inline tables must always be on a single line: -// -// table = {key = 42, second = 43} -// -// It is invalid to split them over multiple lines like so: -// -// # INVALID -// table = { -// key = 42, -// second = 43 -// } -// -// Use regular for this: -// -// [table] -// key = 42 -// second = 43 -type ParseError struct { - Message string // Short technical message. - Usage string // Longer message with usage guidance; may be blank. - Position Position // Position of the error - LastKey string // Last parsed key, may be blank. - - // Line the error occurred. - // - // Deprecated: use [Position]. - Line int - - err error - input string -} - -// Position of an error. -type Position struct { - Line int // Line number, starting at 1. - Start int // Start of error, as byte offset starting at 0. - Len int // Lenght in bytes. -} - -func (pe ParseError) Error() string { - msg := pe.Message - if msg == "" { // Error from errorf() - msg = pe.err.Error() - } - - if pe.LastKey == "" { - return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, msg) - } - return fmt.Sprintf("toml: line %d (last key %q): %s", - pe.Position.Line, pe.LastKey, msg) -} - -// ErrorWithPosition returns the error with detailed location context. -// -// See the documentation on [ParseError]. -func (pe ParseError) ErrorWithPosition() string { - if pe.input == "" { // Should never happen, but just in case. - return pe.Error() - } - - var ( - lines = strings.Split(pe.input, "\n") - col = pe.column(lines) - b = new(strings.Builder) - ) - - msg := pe.Message - if msg == "" { - msg = pe.err.Error() - } - - // TODO: don't show control characters as literals? This may not show up - // well everywhere. - - if pe.Position.Len == 1 { - fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n", - msg, pe.Position.Line, col+1) - } else { - fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n", - msg, pe.Position.Line, col, col+pe.Position.Len) - } - if pe.Position.Line > 2 { - fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, lines[pe.Position.Line-3]) - } - if pe.Position.Line > 1 { - fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, lines[pe.Position.Line-2]) - } - fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, lines[pe.Position.Line-1]) - fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col), strings.Repeat("^", pe.Position.Len)) - return b.String() -} - -// ErrorWithUsage returns the error with detailed location context and usage -// guidance. -// -// See the documentation on [ParseError]. -func (pe ParseError) ErrorWithUsage() string { - m := pe.ErrorWithPosition() - if u, ok := pe.err.(interface{ Usage() string }); ok && u.Usage() != "" { - lines := strings.Split(strings.TrimSpace(u.Usage()), "\n") - for i := range lines { - if lines[i] != "" { - lines[i] = " " + lines[i] - } - } - return m + "Error help:\n\n" + strings.Join(lines, "\n") + "\n" - } - return m -} - -func (pe ParseError) column(lines []string) int { - var pos, col int - for i := range lines { - ll := len(lines[i]) + 1 // +1 for the removed newline - if pos+ll >= pe.Position.Start { - col = pe.Position.Start - pos - if col < 0 { // Should never happen, but just in case. - col = 0 - } - break - } - pos += ll - } - - return col -} - -type ( - errLexControl struct{ r rune } - errLexEscape struct{ r rune } - errLexUTF8 struct{ b byte } - errLexInvalidNum struct{ v string } - errLexInvalidDate struct{ v string } - errLexInlineTableNL struct{} - errLexStringNL struct{} - errParseRange struct { - i interface{} // int or float - size string // "int64", "uint16", etc. - } - errParseDuration struct{ d string } -) - -func (e errLexControl) Error() string { - return fmt.Sprintf("TOML files cannot contain control characters: '0x%02x'", e.r) -} -func (e errLexControl) Usage() string { return "" } - -func (e errLexEscape) Error() string { return fmt.Sprintf(`invalid escape in string '\%c'`, e.r) } -func (e errLexEscape) Usage() string { return usageEscape } -func (e errLexUTF8) Error() string { return fmt.Sprintf("invalid UTF-8 byte: 0x%02x", e.b) } -func (e errLexUTF8) Usage() string { return "" } -func (e errLexInvalidNum) Error() string { return fmt.Sprintf("invalid number: %q", e.v) } -func (e errLexInvalidNum) Usage() string { return "" } -func (e errLexInvalidDate) Error() string { return fmt.Sprintf("invalid date: %q", e.v) } -func (e errLexInvalidDate) Usage() string { return "" } -func (e errLexInlineTableNL) Error() string { return "newlines not allowed within inline tables" } -func (e errLexInlineTableNL) Usage() string { return usageInlineNewline } -func (e errLexStringNL) Error() string { return "strings cannot contain newlines" } -func (e errLexStringNL) Usage() string { return usageStringNewline } -func (e errParseRange) Error() string { return fmt.Sprintf("%v is out of range for %s", e.i, e.size) } -func (e errParseRange) Usage() string { return usageIntOverflow } -func (e errParseDuration) Error() string { return fmt.Sprintf("invalid duration: %q", e.d) } -func (e errParseDuration) Usage() string { return usageDuration } - -const usageEscape = ` -A '\' inside a "-delimited string is interpreted as an escape character. - -The following escape sequences are supported: -\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX - -To prevent a '\' from being recognized as an escape character, use either: - -- a ' or '''-delimited string; escape characters aren't processed in them; or -- write two backslashes to get a single backslash: '\\'. - -If you're trying to add a Windows path (e.g. "C:\Users\martin") then using '/' -instead of '\' will usually also work: "C:/Users/martin". -` - -const usageInlineNewline = ` -Inline tables must always be on a single line: - - table = {key = 42, second = 43} - -It is invalid to split them over multiple lines like so: - - # INVALID - table = { - key = 42, - second = 43 - } - -Use regular for this: - - [table] - key = 42 - second = 43 -` - -const usageStringNewline = ` -Strings must always be on a single line, and cannot span more than one line: - - # INVALID - string = "Hello, - world!" - -Instead use """ or ''' to split strings over multiple lines: - - string = """Hello, - world!""" -` - -const usageIntOverflow = ` -This number is too large; this may be an error in the TOML, but it can also be a -bug in the program that uses too small of an integer. - -The maximum and minimum values are: - - size │ lowest │ highest - ───────┼────────────────┼────────── - int8 │ -128 │ 127 - int16 │ -32,768 │ 32,767 - int32 │ -2,147,483,648 │ 2,147,483,647 - int64 │ -9.2 × 10¹⁷ │ 9.2 × 10¹⁷ - uint8 │ 0 │ 255 - uint16 │ 0 │ 65535 - uint32 │ 0 │ 4294967295 - uint64 │ 0 │ 1.8 × 10¹⁸ - -int refers to int32 on 32-bit systems and int64 on 64-bit systems. -` - -const usageDuration = ` -A duration must be as "number", without any spaces. Valid units are: - - ns nanoseconds (billionth of a second) - us, µs microseconds (millionth of a second) - ms milliseconds (thousands of a second) - s seconds - m minutes - h hours - -You can combine multiple units; for example "5m10s" for 5 minutes and 10 -seconds. -` diff --git a/vendor/github.com/BurntSushi/toml/internal/tz.go b/vendor/github.com/BurntSushi/toml/internal/tz.go deleted file mode 100644 index 022f15bc2b..0000000000 --- a/vendor/github.com/BurntSushi/toml/internal/tz.go +++ /dev/null @@ -1,36 +0,0 @@ -package internal - -import "time" - -// Timezones used for local datetime, date, and time TOML types. -// -// The exact way times and dates without a timezone should be interpreted is not -// well-defined in the TOML specification and left to the implementation. These -// defaults to current local timezone offset of the computer, but this can be -// changed by changing these variables before decoding. -// -// TODO: -// Ideally we'd like to offer people the ability to configure the used timezone -// by setting Decoder.Timezone and Encoder.Timezone; however, this is a bit -// tricky: the reason we use three different variables for this is to support -// round-tripping – without these specific TZ names we wouldn't know which -// format to use. -// -// There isn't a good way to encode this right now though, and passing this sort -// of information also ties in to various related issues such as string format -// encoding, encoding of comments, etc. -// -// So, for the time being, just put this in internal until we can write a good -// comprehensive API for doing all of this. -// -// The reason they're exported is because they're referred from in e.g. -// internal/tag. -// -// Note that this behaviour is valid according to the TOML spec as the exact -// behaviour is left up to implementations. -var ( - localOffset = func() int { _, o := time.Now().Zone(); return o }() - LocalDatetime = time.FixedZone("datetime-local", localOffset) - LocalDate = time.FixedZone("date-local", localOffset) - LocalTime = time.FixedZone("time-local", localOffset) -) diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go deleted file mode 100644 index 3545a6ad66..0000000000 --- a/vendor/github.com/BurntSushi/toml/lex.go +++ /dev/null @@ -1,1283 +0,0 @@ -package toml - -import ( - "fmt" - "reflect" - "runtime" - "strings" - "unicode" - "unicode/utf8" -) - -type itemType int - -const ( - itemError itemType = iota - itemNIL // used in the parser to indicate no type - itemEOF - itemText - itemString - itemRawString - itemMultilineString - itemRawMultilineString - itemBool - itemInteger - itemFloat - itemDatetime - itemArray // the start of an array - itemArrayEnd - itemTableStart - itemTableEnd - itemArrayTableStart - itemArrayTableEnd - itemKeyStart - itemKeyEnd - itemCommentStart - itemInlineTableStart - itemInlineTableEnd -) - -const eof = 0 - -type stateFn func(lx *lexer) stateFn - -func (p Position) String() string { - return fmt.Sprintf("at line %d; start %d; length %d", p.Line, p.Start, p.Len) -} - -type lexer struct { - input string - start int - pos int - line int - state stateFn - items chan item - tomlNext bool - - // Allow for backing up up to 4 runes. This is necessary because TOML - // contains 3-rune tokens (""" and '''). - prevWidths [4]int - nprev int // how many of prevWidths are in use - atEOF bool // If we emit an eof, we can still back up, but it is not OK to call next again. - - // A stack of state functions used to maintain context. - // - // The idea is to reuse parts of the state machine in various places. For - // example, values can appear at the top level or within arbitrarily nested - // arrays. The last state on the stack is used after a value has been lexed. - // Similarly for comments. - stack []stateFn -} - -type item struct { - typ itemType - val string - err error - pos Position -} - -func (lx *lexer) nextItem() item { - for { - select { - case item := <-lx.items: - return item - default: - lx.state = lx.state(lx) - //fmt.Printf(" STATE %-24s current: %-10s stack: %s\n", lx.state, lx.current(), lx.stack) - } - } -} - -func lex(input string, tomlNext bool) *lexer { - lx := &lexer{ - input: input, - state: lexTop, - items: make(chan item, 10), - stack: make([]stateFn, 0, 10), - line: 1, - tomlNext: tomlNext, - } - return lx -} - -func (lx *lexer) push(state stateFn) { - lx.stack = append(lx.stack, state) -} - -func (lx *lexer) pop() stateFn { - if len(lx.stack) == 0 { - return lx.errorf("BUG in lexer: no states to pop") - } - last := lx.stack[len(lx.stack)-1] - lx.stack = lx.stack[0 : len(lx.stack)-1] - return last -} - -func (lx *lexer) current() string { - return lx.input[lx.start:lx.pos] -} - -func (lx lexer) getPos() Position { - p := Position{ - Line: lx.line, - Start: lx.start, - Len: lx.pos - lx.start, - } - if p.Len <= 0 { - p.Len = 1 - } - return p -} - -func (lx *lexer) emit(typ itemType) { - // Needed for multiline strings ending with an incomplete UTF-8 sequence. - if lx.start > lx.pos { - lx.error(errLexUTF8{lx.input[lx.pos]}) - return - } - lx.items <- item{typ: typ, pos: lx.getPos(), val: lx.current()} - lx.start = lx.pos -} - -func (lx *lexer) emitTrim(typ itemType) { - lx.items <- item{typ: typ, pos: lx.getPos(), val: strings.TrimSpace(lx.current())} - lx.start = lx.pos -} - -func (lx *lexer) next() (r rune) { - if lx.atEOF { - panic("BUG in lexer: next called after EOF") - } - if lx.pos >= len(lx.input) { - lx.atEOF = true - return eof - } - - if lx.input[lx.pos] == '\n' { - lx.line++ - } - lx.prevWidths[3] = lx.prevWidths[2] - lx.prevWidths[2] = lx.prevWidths[1] - lx.prevWidths[1] = lx.prevWidths[0] - if lx.nprev < 4 { - lx.nprev++ - } - - r, w := utf8.DecodeRuneInString(lx.input[lx.pos:]) - if r == utf8.RuneError { - lx.error(errLexUTF8{lx.input[lx.pos]}) - return utf8.RuneError - } - - // Note: don't use peek() here, as this calls next(). - if isControl(r) || (r == '\r' && (len(lx.input)-1 == lx.pos || lx.input[lx.pos+1] != '\n')) { - lx.errorControlChar(r) - return utf8.RuneError - } - - lx.prevWidths[0] = w - lx.pos += w - return r -} - -// ignore skips over the pending input before this point. -func (lx *lexer) ignore() { - lx.start = lx.pos -} - -// backup steps back one rune. Can be called 4 times between calls to next. -func (lx *lexer) backup() { - if lx.atEOF { - lx.atEOF = false - return - } - if lx.nprev < 1 { - panic("BUG in lexer: backed up too far") - } - w := lx.prevWidths[0] - lx.prevWidths[0] = lx.prevWidths[1] - lx.prevWidths[1] = lx.prevWidths[2] - lx.prevWidths[2] = lx.prevWidths[3] - lx.nprev-- - - lx.pos -= w - if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { - lx.line-- - } -} - -// accept consumes the next rune if it's equal to `valid`. -func (lx *lexer) accept(valid rune) bool { - if lx.next() == valid { - return true - } - lx.backup() - return false -} - -// peek returns but does not consume the next rune in the input. -func (lx *lexer) peek() rune { - r := lx.next() - lx.backup() - return r -} - -// skip ignores all input that matches the given predicate. -func (lx *lexer) skip(pred func(rune) bool) { - for { - r := lx.next() - if pred(r) { - continue - } - lx.backup() - lx.ignore() - return - } -} - -// error stops all lexing by emitting an error and returning `nil`. -// -// Note that any value that is a character is escaped if it's a special -// character (newlines, tabs, etc.). -func (lx *lexer) error(err error) stateFn { - if lx.atEOF { - return lx.errorPrevLine(err) - } - lx.items <- item{typ: itemError, pos: lx.getPos(), err: err} - return nil -} - -// errorfPrevline is like error(), but sets the position to the last column of -// the previous line. -// -// This is so that unexpected EOF or NL errors don't show on a new blank line. -func (lx *lexer) errorPrevLine(err error) stateFn { - pos := lx.getPos() - pos.Line-- - pos.Len = 1 - pos.Start = lx.pos - 1 - lx.items <- item{typ: itemError, pos: pos, err: err} - return nil -} - -// errorPos is like error(), but allows explicitly setting the position. -func (lx *lexer) errorPos(start, length int, err error) stateFn { - pos := lx.getPos() - pos.Start = start - pos.Len = length - lx.items <- item{typ: itemError, pos: pos, err: err} - return nil -} - -// errorf is like error, and creates a new error. -func (lx *lexer) errorf(format string, values ...interface{}) stateFn { - if lx.atEOF { - pos := lx.getPos() - pos.Line-- - pos.Len = 1 - pos.Start = lx.pos - 1 - lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)} - return nil - } - lx.items <- item{typ: itemError, pos: lx.getPos(), err: fmt.Errorf(format, values...)} - return nil -} - -func (lx *lexer) errorControlChar(cc rune) stateFn { - return lx.errorPos(lx.pos-1, 1, errLexControl{cc}) -} - -// lexTop consumes elements at the top level of TOML data. -func lexTop(lx *lexer) stateFn { - r := lx.next() - if isWhitespace(r) || isNL(r) { - return lexSkip(lx, lexTop) - } - switch r { - case '#': - lx.push(lexTop) - return lexCommentStart - case '[': - return lexTableStart - case eof: - if lx.pos > lx.start { - return lx.errorf("unexpected EOF") - } - lx.emit(itemEOF) - return nil - } - - // At this point, the only valid item can be a key, so we back up - // and let the key lexer do the rest. - lx.backup() - lx.push(lexTopEnd) - return lexKeyStart -} - -// lexTopEnd is entered whenever a top-level item has been consumed. (A value -// or a table.) It must see only whitespace, and will turn back to lexTop -// upon a newline. If it sees EOF, it will quit the lexer successfully. -func lexTopEnd(lx *lexer) stateFn { - r := lx.next() - switch { - case r == '#': - // a comment will read to a newline for us. - lx.push(lexTop) - return lexCommentStart - case isWhitespace(r): - return lexTopEnd - case isNL(r): - lx.ignore() - return lexTop - case r == eof: - lx.emit(itemEOF) - return nil - } - return lx.errorf( - "expected a top-level item to end with a newline, comment, or EOF, but got %q instead", - r) -} - -// lexTable lexes the beginning of a table. Namely, it makes sure that -// it starts with a character other than '.' and ']'. -// It assumes that '[' has already been consumed. -// It also handles the case that this is an item in an array of tables. -// e.g., '[[name]]'. -func lexTableStart(lx *lexer) stateFn { - if lx.peek() == '[' { - lx.next() - lx.emit(itemArrayTableStart) - lx.push(lexArrayTableEnd) - } else { - lx.emit(itemTableStart) - lx.push(lexTableEnd) - } - return lexTableNameStart -} - -func lexTableEnd(lx *lexer) stateFn { - lx.emit(itemTableEnd) - return lexTopEnd -} - -func lexArrayTableEnd(lx *lexer) stateFn { - if r := lx.next(); r != ']' { - return lx.errorf("expected end of table array name delimiter ']', but got %q instead", r) - } - lx.emit(itemArrayTableEnd) - return lexTopEnd -} - -func lexTableNameStart(lx *lexer) stateFn { - lx.skip(isWhitespace) - switch r := lx.peek(); { - case r == ']' || r == eof: - return lx.errorf("unexpected end of table name (table names cannot be empty)") - case r == '.': - return lx.errorf("unexpected table separator (table names cannot be empty)") - case r == '"' || r == '\'': - lx.ignore() - lx.push(lexTableNameEnd) - return lexQuotedName - default: - lx.push(lexTableNameEnd) - return lexBareName - } -} - -// lexTableNameEnd reads the end of a piece of a table name, optionally -// consuming whitespace. -func lexTableNameEnd(lx *lexer) stateFn { - lx.skip(isWhitespace) - switch r := lx.next(); { - case isWhitespace(r): - return lexTableNameEnd - case r == '.': - lx.ignore() - return lexTableNameStart - case r == ']': - return lx.pop() - default: - return lx.errorf("expected '.' or ']' to end table name, but got %q instead", r) - } -} - -// lexBareName lexes one part of a key or table. -// -// It assumes that at least one valid character for the table has already been -// read. -// -// Lexes only one part, e.g. only 'a' inside 'a.b'. -func lexBareName(lx *lexer) stateFn { - r := lx.next() - if isBareKeyChar(r, lx.tomlNext) { - return lexBareName - } - lx.backup() - lx.emit(itemText) - return lx.pop() -} - -// lexBareName lexes one part of a key or table. -// -// It assumes that at least one valid character for the table has already been -// read. -// -// Lexes only one part, e.g. only '"a"' inside '"a".b'. -func lexQuotedName(lx *lexer) stateFn { - r := lx.next() - switch { - case isWhitespace(r): - return lexSkip(lx, lexValue) - case r == '"': - lx.ignore() // ignore the '"' - return lexString - case r == '\'': - lx.ignore() // ignore the "'" - return lexRawString - case r == eof: - return lx.errorf("unexpected EOF; expected value") - default: - return lx.errorf("expected value but found %q instead", r) - } -} - -// lexKeyStart consumes all key parts until a '='. -func lexKeyStart(lx *lexer) stateFn { - lx.skip(isWhitespace) - switch r := lx.peek(); { - case r == '=' || r == eof: - return lx.errorf("unexpected '=': key name appears blank") - case r == '.': - return lx.errorf("unexpected '.': keys cannot start with a '.'") - case r == '"' || r == '\'': - lx.ignore() - fallthrough - default: // Bare key - lx.emit(itemKeyStart) - return lexKeyNameStart - } -} - -func lexKeyNameStart(lx *lexer) stateFn { - lx.skip(isWhitespace) - switch r := lx.peek(); { - case r == '=' || r == eof: - return lx.errorf("unexpected '='") - case r == '.': - return lx.errorf("unexpected '.'") - case r == '"' || r == '\'': - lx.ignore() - lx.push(lexKeyEnd) - return lexQuotedName - default: - lx.push(lexKeyEnd) - return lexBareName - } -} - -// lexKeyEnd consumes the end of a key and trims whitespace (up to the key -// separator). -func lexKeyEnd(lx *lexer) stateFn { - lx.skip(isWhitespace) - switch r := lx.next(); { - case isWhitespace(r): - return lexSkip(lx, lexKeyEnd) - case r == eof: - return lx.errorf("unexpected EOF; expected key separator '='") - case r == '.': - lx.ignore() - return lexKeyNameStart - case r == '=': - lx.emit(itemKeyEnd) - return lexSkip(lx, lexValue) - default: - return lx.errorf("expected '.' or '=', but got %q instead", r) - } -} - -// lexValue starts the consumption of a value anywhere a value is expected. -// lexValue will ignore whitespace. -// After a value is lexed, the last state on the next is popped and returned. -func lexValue(lx *lexer) stateFn { - // We allow whitespace to precede a value, but NOT newlines. - // In array syntax, the array states are responsible for ignoring newlines. - r := lx.next() - switch { - case isWhitespace(r): - return lexSkip(lx, lexValue) - case isDigit(r): - lx.backup() // avoid an extra state and use the same as above - return lexNumberOrDateStart - } - switch r { - case '[': - lx.ignore() - lx.emit(itemArray) - return lexArrayValue - case '{': - lx.ignore() - lx.emit(itemInlineTableStart) - return lexInlineTableValue - case '"': - if lx.accept('"') { - if lx.accept('"') { - lx.ignore() // Ignore """ - return lexMultilineString - } - lx.backup() - } - lx.ignore() // ignore the '"' - return lexString - case '\'': - if lx.accept('\'') { - if lx.accept('\'') { - lx.ignore() // Ignore """ - return lexMultilineRawString - } - lx.backup() - } - lx.ignore() // ignore the "'" - return lexRawString - case '.': // special error case, be kind to users - return lx.errorf("floats must start with a digit, not '.'") - case 'i', 'n': - if (lx.accept('n') && lx.accept('f')) || (lx.accept('a') && lx.accept('n')) { - lx.emit(itemFloat) - return lx.pop() - } - case '-', '+': - return lexDecimalNumberStart - } - if unicode.IsLetter(r) { - // Be permissive here; lexBool will give a nice error if the - // user wrote something like - // x = foo - // (i.e. not 'true' or 'false' but is something else word-like.) - lx.backup() - return lexBool - } - if r == eof { - return lx.errorf("unexpected EOF; expected value") - } - return lx.errorf("expected value but found %q instead", r) -} - -// lexArrayValue consumes one value in an array. It assumes that '[' or ',' -// have already been consumed. All whitespace and newlines are ignored. -func lexArrayValue(lx *lexer) stateFn { - r := lx.next() - switch { - case isWhitespace(r) || isNL(r): - return lexSkip(lx, lexArrayValue) - case r == '#': - lx.push(lexArrayValue) - return lexCommentStart - case r == ',': - return lx.errorf("unexpected comma") - case r == ']': - return lexArrayEnd - } - - lx.backup() - lx.push(lexArrayValueEnd) - return lexValue -} - -// lexArrayValueEnd consumes everything between the end of an array value and -// the next value (or the end of the array): it ignores whitespace and newlines -// and expects either a ',' or a ']'. -func lexArrayValueEnd(lx *lexer) stateFn { - switch r := lx.next(); { - case isWhitespace(r) || isNL(r): - return lexSkip(lx, lexArrayValueEnd) - case r == '#': - lx.push(lexArrayValueEnd) - return lexCommentStart - case r == ',': - lx.ignore() - return lexArrayValue // move on to the next value - case r == ']': - return lexArrayEnd - default: - return lx.errorf("expected a comma (',') or array terminator (']'), but got %s", runeOrEOF(r)) - } -} - -// lexArrayEnd finishes the lexing of an array. -// It assumes that a ']' has just been consumed. -func lexArrayEnd(lx *lexer) stateFn { - lx.ignore() - lx.emit(itemArrayEnd) - return lx.pop() -} - -// lexInlineTableValue consumes one key/value pair in an inline table. -// It assumes that '{' or ',' have already been consumed. Whitespace is ignored. -func lexInlineTableValue(lx *lexer) stateFn { - r := lx.next() - switch { - case isWhitespace(r): - return lexSkip(lx, lexInlineTableValue) - case isNL(r): - if lx.tomlNext { - return lexSkip(lx, lexInlineTableValue) - } - return lx.errorPrevLine(errLexInlineTableNL{}) - case r == '#': - lx.push(lexInlineTableValue) - return lexCommentStart - case r == ',': - return lx.errorf("unexpected comma") - case r == '}': - return lexInlineTableEnd - } - lx.backup() - lx.push(lexInlineTableValueEnd) - return lexKeyStart -} - -// lexInlineTableValueEnd consumes everything between the end of an inline table -// key/value pair and the next pair (or the end of the table): -// it ignores whitespace and expects either a ',' or a '}'. -func lexInlineTableValueEnd(lx *lexer) stateFn { - switch r := lx.next(); { - case isWhitespace(r): - return lexSkip(lx, lexInlineTableValueEnd) - case isNL(r): - if lx.tomlNext { - return lexSkip(lx, lexInlineTableValueEnd) - } - return lx.errorPrevLine(errLexInlineTableNL{}) - case r == '#': - lx.push(lexInlineTableValueEnd) - return lexCommentStart - case r == ',': - lx.ignore() - lx.skip(isWhitespace) - if lx.peek() == '}' { - if lx.tomlNext { - return lexInlineTableValueEnd - } - return lx.errorf("trailing comma not allowed in inline tables") - } - return lexInlineTableValue - case r == '}': - return lexInlineTableEnd - default: - return lx.errorf("expected a comma or an inline table terminator '}', but got %s instead", runeOrEOF(r)) - } -} - -func runeOrEOF(r rune) string { - if r == eof { - return "end of file" - } - return "'" + string(r) + "'" -} - -// lexInlineTableEnd finishes the lexing of an inline table. -// It assumes that a '}' has just been consumed. -func lexInlineTableEnd(lx *lexer) stateFn { - lx.ignore() - lx.emit(itemInlineTableEnd) - return lx.pop() -} - -// lexString consumes the inner contents of a string. It assumes that the -// beginning '"' has already been consumed and ignored. -func lexString(lx *lexer) stateFn { - r := lx.next() - switch { - case r == eof: - return lx.errorf(`unexpected EOF; expected '"'`) - case isNL(r): - return lx.errorPrevLine(errLexStringNL{}) - case r == '\\': - lx.push(lexString) - return lexStringEscape - case r == '"': - lx.backup() - lx.emit(itemString) - lx.next() - lx.ignore() - return lx.pop() - } - return lexString -} - -// lexMultilineString consumes the inner contents of a string. It assumes that -// the beginning '"""' has already been consumed and ignored. -func lexMultilineString(lx *lexer) stateFn { - r := lx.next() - switch r { - default: - return lexMultilineString - case eof: - return lx.errorf(`unexpected EOF; expected '"""'`) - case '\\': - return lexMultilineStringEscape - case '"': - /// Found " → try to read two more "". - if lx.accept('"') { - if lx.accept('"') { - /// Peek ahead: the string can contain " and "", including at the - /// end: """str""""" - /// 6 or more at the end, however, is an error. - if lx.peek() == '"' { - /// Check if we already lexed 5 's; if so we have 6 now, and - /// that's just too many man! - /// - /// Second check is for the edge case: - /// - /// two quotes allowed. - /// vv - /// """lol \"""""" - /// ^^ ^^^---- closing three - /// escaped - /// - /// But ugly, but it works - if strings.HasSuffix(lx.current(), `"""""`) && !strings.HasSuffix(lx.current(), `\"""""`) { - return lx.errorf(`unexpected '""""""'`) - } - lx.backup() - lx.backup() - return lexMultilineString - } - - lx.backup() /// backup: don't include the """ in the item. - lx.backup() - lx.backup() - lx.emit(itemMultilineString) - lx.next() /// Read over ''' again and discard it. - lx.next() - lx.next() - lx.ignore() - return lx.pop() - } - lx.backup() - } - return lexMultilineString - } -} - -// lexRawString consumes a raw string. Nothing can be escaped in such a string. -// It assumes that the beginning "'" has already been consumed and ignored. -func lexRawString(lx *lexer) stateFn { - r := lx.next() - switch { - default: - return lexRawString - case r == eof: - return lx.errorf(`unexpected EOF; expected "'"`) - case isNL(r): - return lx.errorPrevLine(errLexStringNL{}) - case r == '\'': - lx.backup() - lx.emit(itemRawString) - lx.next() - lx.ignore() - return lx.pop() - } -} - -// lexMultilineRawString consumes a raw string. Nothing can be escaped in such a -// string. It assumes that the beginning triple-' has already been consumed and -// ignored. -func lexMultilineRawString(lx *lexer) stateFn { - r := lx.next() - switch r { - default: - return lexMultilineRawString - case eof: - return lx.errorf(`unexpected EOF; expected "'''"`) - case '\'': - /// Found ' → try to read two more ''. - if lx.accept('\'') { - if lx.accept('\'') { - /// Peek ahead: the string can contain ' and '', including at the - /// end: '''str''''' - /// 6 or more at the end, however, is an error. - if lx.peek() == '\'' { - /// Check if we already lexed 5 's; if so we have 6 now, and - /// that's just too many man! - if strings.HasSuffix(lx.current(), "'''''") { - return lx.errorf(`unexpected "''''''"`) - } - lx.backup() - lx.backup() - return lexMultilineRawString - } - - lx.backup() /// backup: don't include the ''' in the item. - lx.backup() - lx.backup() - lx.emit(itemRawMultilineString) - lx.next() /// Read over ''' again and discard it. - lx.next() - lx.next() - lx.ignore() - return lx.pop() - } - lx.backup() - } - return lexMultilineRawString - } -} - -// lexMultilineStringEscape consumes an escaped character. It assumes that the -// preceding '\\' has already been consumed. -func lexMultilineStringEscape(lx *lexer) stateFn { - if isNL(lx.next()) { /// \ escaping newline. - return lexMultilineString - } - lx.backup() - lx.push(lexMultilineString) - return lexStringEscape(lx) -} - -func lexStringEscape(lx *lexer) stateFn { - r := lx.next() - switch r { - case 'e': - if !lx.tomlNext { - return lx.error(errLexEscape{r}) - } - fallthrough - case 'b': - fallthrough - case 't': - fallthrough - case 'n': - fallthrough - case 'f': - fallthrough - case 'r': - fallthrough - case '"': - fallthrough - case ' ', '\t': - // Inside """ .. """ strings you can use \ to escape newlines, and any - // amount of whitespace can be between the \ and \n. - fallthrough - case '\\': - return lx.pop() - case 'x': - if !lx.tomlNext { - return lx.error(errLexEscape{r}) - } - return lexHexEscape - case 'u': - return lexShortUnicodeEscape - case 'U': - return lexLongUnicodeEscape - } - return lx.error(errLexEscape{r}) -} - -func lexHexEscape(lx *lexer) stateFn { - var r rune - for i := 0; i < 2; i++ { - r = lx.next() - if !isHexadecimal(r) { - return lx.errorf( - `expected two hexadecimal digits after '\x', but got %q instead`, - lx.current()) - } - } - return lx.pop() -} - -func lexShortUnicodeEscape(lx *lexer) stateFn { - var r rune - for i := 0; i < 4; i++ { - r = lx.next() - if !isHexadecimal(r) { - return lx.errorf( - `expected four hexadecimal digits after '\u', but got %q instead`, - lx.current()) - } - } - return lx.pop() -} - -func lexLongUnicodeEscape(lx *lexer) stateFn { - var r rune - for i := 0; i < 8; i++ { - r = lx.next() - if !isHexadecimal(r) { - return lx.errorf( - `expected eight hexadecimal digits after '\U', but got %q instead`, - lx.current()) - } - } - return lx.pop() -} - -// lexNumberOrDateStart processes the first character of a value which begins -// with a digit. It exists to catch values starting with '0', so that -// lexBaseNumberOrDate can differentiate base prefixed integers from other -// types. -func lexNumberOrDateStart(lx *lexer) stateFn { - r := lx.next() - switch r { - case '0': - return lexBaseNumberOrDate - } - - if !isDigit(r) { - // The only way to reach this state is if the value starts - // with a digit, so specifically treat anything else as an - // error. - return lx.errorf("expected a digit but got %q", r) - } - - return lexNumberOrDate -} - -// lexNumberOrDate consumes either an integer, float or datetime. -func lexNumberOrDate(lx *lexer) stateFn { - r := lx.next() - if isDigit(r) { - return lexNumberOrDate - } - switch r { - case '-', ':': - return lexDatetime - case '_': - return lexDecimalNumber - case '.', 'e', 'E': - return lexFloat - } - - lx.backup() - lx.emit(itemInteger) - return lx.pop() -} - -// lexDatetime consumes a Datetime, to a first approximation. -// The parser validates that it matches one of the accepted formats. -func lexDatetime(lx *lexer) stateFn { - r := lx.next() - if isDigit(r) { - return lexDatetime - } - switch r { - case '-', ':', 'T', 't', ' ', '.', 'Z', 'z', '+': - return lexDatetime - } - - lx.backup() - lx.emitTrim(itemDatetime) - return lx.pop() -} - -// lexHexInteger consumes a hexadecimal integer after seeing the '0x' prefix. -func lexHexInteger(lx *lexer) stateFn { - r := lx.next() - if isHexadecimal(r) { - return lexHexInteger - } - switch r { - case '_': - return lexHexInteger - } - - lx.backup() - lx.emit(itemInteger) - return lx.pop() -} - -// lexOctalInteger consumes an octal integer after seeing the '0o' prefix. -func lexOctalInteger(lx *lexer) stateFn { - r := lx.next() - if isOctal(r) { - return lexOctalInteger - } - switch r { - case '_': - return lexOctalInteger - } - - lx.backup() - lx.emit(itemInteger) - return lx.pop() -} - -// lexBinaryInteger consumes a binary integer after seeing the '0b' prefix. -func lexBinaryInteger(lx *lexer) stateFn { - r := lx.next() - if isBinary(r) { - return lexBinaryInteger - } - switch r { - case '_': - return lexBinaryInteger - } - - lx.backup() - lx.emit(itemInteger) - return lx.pop() -} - -// lexDecimalNumber consumes a decimal float or integer. -func lexDecimalNumber(lx *lexer) stateFn { - r := lx.next() - if isDigit(r) { - return lexDecimalNumber - } - switch r { - case '.', 'e', 'E': - return lexFloat - case '_': - return lexDecimalNumber - } - - lx.backup() - lx.emit(itemInteger) - return lx.pop() -} - -// lexDecimalNumber consumes the first digit of a number beginning with a sign. -// It assumes the sign has already been consumed. Values which start with a sign -// are only allowed to be decimal integers or floats. -// -// The special "nan" and "inf" values are also recognized. -func lexDecimalNumberStart(lx *lexer) stateFn { - r := lx.next() - - // Special error cases to give users better error messages - switch r { - case 'i': - if !lx.accept('n') || !lx.accept('f') { - return lx.errorf("invalid float: '%s'", lx.current()) - } - lx.emit(itemFloat) - return lx.pop() - case 'n': - if !lx.accept('a') || !lx.accept('n') { - return lx.errorf("invalid float: '%s'", lx.current()) - } - lx.emit(itemFloat) - return lx.pop() - case '0': - p := lx.peek() - switch p { - case 'b', 'o', 'x': - return lx.errorf("cannot use sign with non-decimal numbers: '%s%c'", lx.current(), p) - } - case '.': - return lx.errorf("floats must start with a digit, not '.'") - } - - if isDigit(r) { - return lexDecimalNumber - } - - return lx.errorf("expected a digit but got %q", r) -} - -// lexBaseNumberOrDate differentiates between the possible values which -// start with '0'. It assumes that before reaching this state, the initial '0' -// has been consumed. -func lexBaseNumberOrDate(lx *lexer) stateFn { - r := lx.next() - // Note: All datetimes start with at least two digits, so we don't - // handle date characters (':', '-', etc.) here. - if isDigit(r) { - return lexNumberOrDate - } - switch r { - case '_': - // Can only be decimal, because there can't be an underscore - // between the '0' and the base designator, and dates can't - // contain underscores. - return lexDecimalNumber - case '.', 'e', 'E': - return lexFloat - case 'b': - r = lx.peek() - if !isBinary(r) { - lx.errorf("not a binary number: '%s%c'", lx.current(), r) - } - return lexBinaryInteger - case 'o': - r = lx.peek() - if !isOctal(r) { - lx.errorf("not an octal number: '%s%c'", lx.current(), r) - } - return lexOctalInteger - case 'x': - r = lx.peek() - if !isHexadecimal(r) { - lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r) - } - return lexHexInteger - } - - lx.backup() - lx.emit(itemInteger) - return lx.pop() -} - -// lexFloat consumes the elements of a float. It allows any sequence of -// float-like characters, so floats emitted by the lexer are only a first -// approximation and must be validated by the parser. -func lexFloat(lx *lexer) stateFn { - r := lx.next() - if isDigit(r) { - return lexFloat - } - switch r { - case '_', '.', '-', '+', 'e', 'E': - return lexFloat - } - - lx.backup() - lx.emit(itemFloat) - return lx.pop() -} - -// lexBool consumes a bool string: 'true' or 'false. -func lexBool(lx *lexer) stateFn { - var rs []rune - for { - r := lx.next() - if !unicode.IsLetter(r) { - lx.backup() - break - } - rs = append(rs, r) - } - s := string(rs) - switch s { - case "true", "false": - lx.emit(itemBool) - return lx.pop() - } - return lx.errorf("expected value but found %q instead", s) -} - -// lexCommentStart begins the lexing of a comment. It will emit -// itemCommentStart and consume no characters, passing control to lexComment. -func lexCommentStart(lx *lexer) stateFn { - lx.ignore() - lx.emit(itemCommentStart) - return lexComment -} - -// lexComment lexes an entire comment. It assumes that '#' has been consumed. -// It will consume *up to* the first newline character, and pass control -// back to the last state on the stack. -func lexComment(lx *lexer) stateFn { - switch r := lx.next(); { - case isNL(r) || r == eof: - lx.backup() - lx.emit(itemText) - return lx.pop() - default: - return lexComment - } -} - -// lexSkip ignores all slurped input and moves on to the next state. -func lexSkip(lx *lexer, nextState stateFn) stateFn { - lx.ignore() - return nextState -} - -func (s stateFn) String() string { - name := runtime.FuncForPC(reflect.ValueOf(s).Pointer()).Name() - if i := strings.LastIndexByte(name, '.'); i > -1 { - name = name[i+1:] - } - if s == nil { - name = "" - } - return name + "()" -} - -func (itype itemType) String() string { - switch itype { - case itemError: - return "Error" - case itemNIL: - return "NIL" - case itemEOF: - return "EOF" - case itemText: - return "Text" - case itemString, itemRawString, itemMultilineString, itemRawMultilineString: - return "String" - case itemBool: - return "Bool" - case itemInteger: - return "Integer" - case itemFloat: - return "Float" - case itemDatetime: - return "DateTime" - case itemTableStart: - return "TableStart" - case itemTableEnd: - return "TableEnd" - case itemKeyStart: - return "KeyStart" - case itemKeyEnd: - return "KeyEnd" - case itemArray: - return "Array" - case itemArrayEnd: - return "ArrayEnd" - case itemCommentStart: - return "CommentStart" - case itemInlineTableStart: - return "InlineTableStart" - case itemInlineTableEnd: - return "InlineTableEnd" - } - panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) -} - -func (item item) String() string { - return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val) -} - -func isWhitespace(r rune) bool { return r == '\t' || r == ' ' } -func isNL(r rune) bool { return r == '\n' || r == '\r' } -func isControl(r rune) bool { // Control characters except \t, \r, \n - switch r { - case '\t', '\r', '\n': - return false - default: - return (r >= 0x00 && r <= 0x1f) || r == 0x7f - } -} -func isDigit(r rune) bool { return r >= '0' && r <= '9' } -func isBinary(r rune) bool { return r == '0' || r == '1' } -func isOctal(r rune) bool { return r >= '0' && r <= '7' } -func isHexadecimal(r rune) bool { - return (r >= '0' && r <= '9') || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F') -} - -func isBareKeyChar(r rune, tomlNext bool) bool { - if tomlNext { - return (r >= 'A' && r <= 'Z') || - (r >= 'a' && r <= 'z') || - (r >= '0' && r <= '9') || - r == '_' || r == '-' || - r == 0xb2 || r == 0xb3 || r == 0xb9 || (r >= 0xbc && r <= 0xbe) || - (r >= 0xc0 && r <= 0xd6) || (r >= 0xd8 && r <= 0xf6) || (r >= 0xf8 && r <= 0x037d) || - (r >= 0x037f && r <= 0x1fff) || - (r >= 0x200c && r <= 0x200d) || (r >= 0x203f && r <= 0x2040) || - (r >= 0x2070 && r <= 0x218f) || (r >= 0x2460 && r <= 0x24ff) || - (r >= 0x2c00 && r <= 0x2fef) || (r >= 0x3001 && r <= 0xd7ff) || - (r >= 0xf900 && r <= 0xfdcf) || (r >= 0xfdf0 && r <= 0xfffd) || - (r >= 0x10000 && r <= 0xeffff) - } - - return (r >= 'A' && r <= 'Z') || - (r >= 'a' && r <= 'z') || - (r >= '0' && r <= '9') || - r == '_' || r == '-' -} diff --git a/vendor/github.com/BurntSushi/toml/meta.go b/vendor/github.com/BurntSushi/toml/meta.go deleted file mode 100644 index 2e78b24e95..0000000000 --- a/vendor/github.com/BurntSushi/toml/meta.go +++ /dev/null @@ -1,121 +0,0 @@ -package toml - -import ( - "strings" -) - -// MetaData allows access to meta information about TOML data that's not -// accessible otherwise. -// -// It allows checking if a key is defined in the TOML data, whether any keys -// were undecoded, and the TOML type of a key. -type MetaData struct { - context Key // Used only during decoding. - - keyInfo map[string]keyInfo - mapping map[string]interface{} - keys []Key - decoded map[string]struct{} - data []byte // Input file; for errors. -} - -// IsDefined reports if the key exists in the TOML data. -// -// The key should be specified hierarchically, for example to access the TOML -// key "a.b.c" you would use IsDefined("a", "b", "c"). Keys are case sensitive. -// -// Returns false for an empty key. -func (md *MetaData) IsDefined(key ...string) bool { - if len(key) == 0 { - return false - } - - var ( - hash map[string]interface{} - ok bool - hashOrVal interface{} = md.mapping - ) - for _, k := range key { - if hash, ok = hashOrVal.(map[string]interface{}); !ok { - return false - } - if hashOrVal, ok = hash[k]; !ok { - return false - } - } - return true -} - -// Type returns a string representation of the type of the key specified. -// -// Type will return the empty string if given an empty key or a key that does -// not exist. Keys are case sensitive. -func (md *MetaData) Type(key ...string) string { - if ki, ok := md.keyInfo[Key(key).String()]; ok { - return ki.tomlType.typeString() - } - return "" -} - -// Keys returns a slice of every key in the TOML data, including key groups. -// -// Each key is itself a slice, where the first element is the top of the -// hierarchy and the last is the most specific. The list will have the same -// order as the keys appeared in the TOML data. -// -// All keys returned are non-empty. -func (md *MetaData) Keys() []Key { - return md.keys -} - -// Undecoded returns all keys that have not been decoded in the order in which -// they appear in the original TOML document. -// -// This includes keys that haven't been decoded because of a [Primitive] value. -// Once the Primitive value is decoded, the keys will be considered decoded. -// -// Also note that decoding into an empty interface will result in no decoding, -// and so no keys will be considered decoded. -// -// In this sense, the Undecoded keys correspond to keys in the TOML document -// that do not have a concrete type in your representation. -func (md *MetaData) Undecoded() []Key { - undecoded := make([]Key, 0, len(md.keys)) - for _, key := range md.keys { - if _, ok := md.decoded[key.String()]; !ok { - undecoded = append(undecoded, key) - } - } - return undecoded -} - -// Key represents any TOML key, including key groups. Use [MetaData.Keys] to get -// values of this type. -type Key []string - -func (k Key) String() string { - ss := make([]string, len(k)) - for i := range k { - ss[i] = k.maybeQuoted(i) - } - return strings.Join(ss, ".") -} - -func (k Key) maybeQuoted(i int) string { - if k[i] == "" { - return `""` - } - for _, c := range k[i] { - if !isBareKeyChar(c, false) { - return `"` + dblQuotedReplacer.Replace(k[i]) + `"` - } - } - return k[i] -} - -func (k Key) add(piece string) Key { - newKey := make(Key, len(k)+1) - copy(newKey, k) - newKey[len(k)] = piece - return newKey -} diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go deleted file mode 100644 index 9c19153698..0000000000 --- a/vendor/github.com/BurntSushi/toml/parse.go +++ /dev/null @@ -1,811 +0,0 @@ -package toml - -import ( - "fmt" - "os" - "strconv" - "strings" - "time" - "unicode/utf8" - - "github.com/BurntSushi/toml/internal" -) - -type parser struct { - lx *lexer - context Key // Full key for the current hash in scope. - currentKey string // Base key name for everything except hashes. - pos Position // Current position in the TOML file. - tomlNext bool - - ordered []Key // List of keys in the order that they appear in the TOML data. - - keyInfo map[string]keyInfo // Map keyname → info about the TOML key. - mapping map[string]interface{} // Map keyname → key value. - implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names"). -} - -type keyInfo struct { - pos Position - tomlType tomlType -} - -func parse(data string) (p *parser, err error) { - _, tomlNext := os.LookupEnv("BURNTSUSHI_TOML_110") - - defer func() { - if r := recover(); r != nil { - if pErr, ok := r.(ParseError); ok { - pErr.input = data - err = pErr - return - } - panic(r) - } - }() - - // Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString() - // which mangles stuff. UTF-16 BOM isn't strictly valid, but some tools add - // it anyway. - if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // UTF-16 - data = data[2:] - } else if strings.HasPrefix(data, "\xef\xbb\xbf") { // UTF-8 - data = data[3:] - } - - // Examine first few bytes for NULL bytes; this probably means it's a UTF-16 - // file (second byte in surrogate pair being NULL). Again, do this here to - // avoid having to deal with UTF-8/16 stuff in the lexer. - ex := 6 - if len(data) < 6 { - ex = len(data) - } - if i := strings.IndexRune(data[:ex], 0); i > -1 { - return nil, ParseError{ - Message: "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8", - Position: Position{Line: 1, Start: i, Len: 1}, - Line: 1, - input: data, - } - } - - p = &parser{ - keyInfo: make(map[string]keyInfo), - mapping: make(map[string]interface{}), - lx: lex(data, tomlNext), - ordered: make([]Key, 0), - implicits: make(map[string]struct{}), - tomlNext: tomlNext, - } - for { - item := p.next() - if item.typ == itemEOF { - break - } - p.topLevel(item) - } - - return p, nil -} - -func (p *parser) panicErr(it item, err error) { - panic(ParseError{ - err: err, - Position: it.pos, - Line: it.pos.Len, - LastKey: p.current(), - }) -} - -func (p *parser) panicItemf(it item, format string, v ...interface{}) { - panic(ParseError{ - Message: fmt.Sprintf(format, v...), - Position: it.pos, - Line: it.pos.Len, - LastKey: p.current(), - }) -} - -func (p *parser) panicf(format string, v ...interface{}) { - panic(ParseError{ - Message: fmt.Sprintf(format, v...), - Position: p.pos, - Line: p.pos.Line, - LastKey: p.current(), - }) -} - -func (p *parser) next() item { - it := p.lx.nextItem() - //fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.pos.Line, it.val) - if it.typ == itemError { - if it.err != nil { - panic(ParseError{ - Position: it.pos, - Line: it.pos.Line, - LastKey: p.current(), - err: it.err, - }) - } - - p.panicItemf(it, "%s", it.val) - } - return it -} - -func (p *parser) nextPos() item { - it := p.next() - p.pos = it.pos - return it -} - -func (p *parser) bug(format string, v ...interface{}) { - panic(fmt.Sprintf("BUG: "+format+"\n\n", v...)) -} - -func (p *parser) expect(typ itemType) item { - it := p.next() - p.assertEqual(typ, it.typ) - return it -} - -func (p *parser) assertEqual(expected, got itemType) { - if expected != got { - p.bug("Expected '%s' but got '%s'.", expected, got) - } -} - -func (p *parser) topLevel(item item) { - switch item.typ { - case itemCommentStart: // # .. - p.expect(itemText) - case itemTableStart: // [ .. ] - name := p.nextPos() - - var key Key - for ; name.typ != itemTableEnd && name.typ != itemEOF; name = p.next() { - key = append(key, p.keyString(name)) - } - p.assertEqual(itemTableEnd, name.typ) - - p.addContext(key, false) - p.setType("", tomlHash, item.pos) - p.ordered = append(p.ordered, key) - case itemArrayTableStart: // [[ .. ]] - name := p.nextPos() - - var key Key - for ; name.typ != itemArrayTableEnd && name.typ != itemEOF; name = p.next() { - key = append(key, p.keyString(name)) - } - p.assertEqual(itemArrayTableEnd, name.typ) - - p.addContext(key, true) - p.setType("", tomlArrayHash, item.pos) - p.ordered = append(p.ordered, key) - case itemKeyStart: // key = .. - outerContext := p.context - /// Read all the key parts (e.g. 'a' and 'b' in 'a.b') - k := p.nextPos() - var key Key - for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() { - key = append(key, p.keyString(k)) - } - p.assertEqual(itemKeyEnd, k.typ) - - /// The current key is the last part. - p.currentKey = key[len(key)-1] - - /// All the other parts (if any) are the context; need to set each part - /// as implicit. - context := key[:len(key)-1] - for i := range context { - p.addImplicitContext(append(p.context, context[i:i+1]...)) - } - p.ordered = append(p.ordered, p.context.add(p.currentKey)) - - /// Set value. - vItem := p.next() - val, typ := p.value(vItem, false) - p.set(p.currentKey, val, typ, vItem.pos) - - /// Remove the context we added (preserving any context from [tbl] lines). - p.context = outerContext - p.currentKey = "" - default: - p.bug("Unexpected type at top level: %s", item.typ) - } -} - -// Gets a string for a key (or part of a key in a table name). -func (p *parser) keyString(it item) string { - switch it.typ { - case itemText: - return it.val - case itemString, itemMultilineString, - itemRawString, itemRawMultilineString: - s, _ := p.value(it, false) - return s.(string) - default: - p.bug("Unexpected key type: %s", it.typ) - } - panic("unreachable") -} - -var datetimeRepl = strings.NewReplacer( - "z", "Z", - "t", "T", - " ", "T") - -// value translates an expected value from the lexer into a Go value wrapped -// as an empty interface. -func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) { - switch it.typ { - case itemString: - return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it) - case itemMultilineString: - return p.replaceEscapes(it, p.stripEscapedNewlines(stripFirstNewline(it.val))), p.typeOfPrimitive(it) - case itemRawString: - return it.val, p.typeOfPrimitive(it) - case itemRawMultilineString: - return stripFirstNewline(it.val), p.typeOfPrimitive(it) - case itemInteger: - return p.valueInteger(it) - case itemFloat: - return p.valueFloat(it) - case itemBool: - switch it.val { - case "true": - return true, p.typeOfPrimitive(it) - case "false": - return false, p.typeOfPrimitive(it) - default: - p.bug("Expected boolean value, but got '%s'.", it.val) - } - case itemDatetime: - return p.valueDatetime(it) - case itemArray: - return p.valueArray(it) - case itemInlineTableStart: - return p.valueInlineTable(it, parentIsArray) - default: - p.bug("Unexpected value type: %s", it.typ) - } - panic("unreachable") -} - -func (p *parser) valueInteger(it item) (interface{}, tomlType) { - if !numUnderscoresOK(it.val) { - p.panicItemf(it, "Invalid integer %q: underscores must be surrounded by digits", it.val) - } - if numHasLeadingZero(it.val) { - p.panicItemf(it, "Invalid integer %q: cannot have leading zeroes", it.val) - } - - num, err := strconv.ParseInt(it.val, 0, 64) - if err != nil { - // Distinguish integer values. Normally, it'd be a bug if the lexer - // provides an invalid integer, but it's possible that the number is - // out of range of valid values (which the lexer cannot determine). - // So mark the former as a bug but the latter as a legitimate user - // error. - if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { - p.panicErr(it, errParseRange{i: it.val, size: "int64"}) - } else { - p.bug("Expected integer value, but got '%s'.", it.val) - } - } - return num, p.typeOfPrimitive(it) -} - -func (p *parser) valueFloat(it item) (interface{}, tomlType) { - parts := strings.FieldsFunc(it.val, func(r rune) bool { - switch r { - case '.', 'e', 'E': - return true - } - return false - }) - for _, part := range parts { - if !numUnderscoresOK(part) { - p.panicItemf(it, "Invalid float %q: underscores must be surrounded by digits", it.val) - } - } - if len(parts) > 0 && numHasLeadingZero(parts[0]) { - p.panicItemf(it, "Invalid float %q: cannot have leading zeroes", it.val) - } - if !numPeriodsOK(it.val) { - // As a special case, numbers like '123.' or '1.e2', - // which are valid as far as Go/strconv are concerned, - // must be rejected because TOML says that a fractional - // part consists of '.' followed by 1+ digits. - p.panicItemf(it, "Invalid float %q: '.' must be followed by one or more digits", it.val) - } - val := strings.Replace(it.val, "_", "", -1) - if val == "+nan" || val == "-nan" { // Go doesn't support this, but TOML spec does. - val = "nan" - } - num, err := strconv.ParseFloat(val, 64) - if err != nil { - if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { - p.panicErr(it, errParseRange{i: it.val, size: "float64"}) - } else { - p.panicItemf(it, "Invalid float value: %q", it.val) - } - } - return num, p.typeOfPrimitive(it) -} - -var dtTypes = []struct { - fmt string - zone *time.Location - next bool -}{ - {time.RFC3339Nano, time.Local, false}, - {"2006-01-02T15:04:05.999999999", internal.LocalDatetime, false}, - {"2006-01-02", internal.LocalDate, false}, - {"15:04:05.999999999", internal.LocalTime, false}, - - // tomlNext - {"2006-01-02T15:04Z07:00", time.Local, true}, - {"2006-01-02T15:04", internal.LocalDatetime, true}, - {"15:04", internal.LocalTime, true}, -} - -func (p *parser) valueDatetime(it item) (interface{}, tomlType) { - it.val = datetimeRepl.Replace(it.val) - var ( - t time.Time - ok bool - err error - ) - for _, dt := range dtTypes { - if dt.next && !p.tomlNext { - continue - } - t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone) - if err == nil { - ok = true - break - } - } - if !ok { - p.panicItemf(it, "Invalid TOML Datetime: %q.", it.val) - } - return t, p.typeOfPrimitive(it) -} - -func (p *parser) valueArray(it item) (interface{}, tomlType) { - p.setType(p.currentKey, tomlArray, it.pos) - - var ( - types []tomlType - - // Initialize to a non-nil empty slice. This makes it consistent with - // how S = [] decodes into a non-nil slice inside something like struct - // { S []string }. See #338 - array = []interface{}{} - ) - for it = p.next(); it.typ != itemArrayEnd; it = p.next() { - if it.typ == itemCommentStart { - p.expect(itemText) - continue - } - - val, typ := p.value(it, true) - array = append(array, val) - types = append(types, typ) - - // XXX: types isn't used here, we need it to record the accurate type - // information. - // - // Not entirely sure how to best store this; could use "key[0]", - // "key[1]" notation, or maybe store it on the Array type? - _ = types - } - return array, tomlArray -} - -func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tomlType) { - var ( - hash = make(map[string]interface{}) - outerContext = p.context - outerKey = p.currentKey - ) - - p.context = append(p.context, p.currentKey) - prevContext := p.context - p.currentKey = "" - - p.addImplicit(p.context) - p.addContext(p.context, parentIsArray) - - /// Loop over all table key/value pairs. - for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() { - if it.typ == itemCommentStart { - p.expect(itemText) - continue - } - - /// Read all key parts. - k := p.nextPos() - var key Key - for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() { - key = append(key, p.keyString(k)) - } - p.assertEqual(itemKeyEnd, k.typ) - - /// The current key is the last part. - p.currentKey = key[len(key)-1] - - /// All the other parts (if any) are the context; need to set each part - /// as implicit. - context := key[:len(key)-1] - for i := range context { - p.addImplicitContext(append(p.context, context[i:i+1]...)) - } - p.ordered = append(p.ordered, p.context.add(p.currentKey)) - - /// Set the value. - val, typ := p.value(p.next(), false) - p.set(p.currentKey, val, typ, it.pos) - hash[p.currentKey] = val - - /// Restore context. - p.context = prevContext - } - p.context = outerContext - p.currentKey = outerKey - return hash, tomlHash -} - -// numHasLeadingZero checks if this number has leading zeroes, allowing for '0', -// +/- signs, and base prefixes. -func numHasLeadingZero(s string) bool { - if len(s) > 1 && s[0] == '0' && !(s[1] == 'b' || s[1] == 'o' || s[1] == 'x') { // Allow 0b, 0o, 0x - return true - } - if len(s) > 2 && (s[0] == '-' || s[0] == '+') && s[1] == '0' { - return true - } - return false -} - -// numUnderscoresOK checks whether each underscore in s is surrounded by -// characters that are not underscores. -func numUnderscoresOK(s string) bool { - switch s { - case "nan", "+nan", "-nan", "inf", "-inf", "+inf": - return true - } - accept := false - for _, r := range s { - if r == '_' { - if !accept { - return false - } - } - - // isHexadecimal is a superset of all the permissable characters - // surrounding an underscore. - accept = isHexadecimal(r) - } - return accept -} - -// numPeriodsOK checks whether every period in s is followed by a digit. -func numPeriodsOK(s string) bool { - period := false - for _, r := range s { - if period && !isDigit(r) { - return false - } - period = r == '.' - } - return !period -} - -// Set the current context of the parser, where the context is either a hash or -// an array of hashes, depending on the value of the `array` parameter. -// -// Establishing the context also makes sure that the key isn't a duplicate, and -// will create implicit hashes automatically. -func (p *parser) addContext(key Key, array bool) { - var ok bool - - // Always start at the top level and drill down for our context. - hashContext := p.mapping - keyContext := make(Key, 0) - - // We only need implicit hashes for key[0:-1] - for _, k := range key[0 : len(key)-1] { - _, ok = hashContext[k] - keyContext = append(keyContext, k) - - // No key? Make an implicit hash and move on. - if !ok { - p.addImplicit(keyContext) - hashContext[k] = make(map[string]interface{}) - } - - // If the hash context is actually an array of tables, then set - // the hash context to the last element in that array. - // - // Otherwise, it better be a table, since this MUST be a key group (by - // virtue of it not being the last element in a key). - switch t := hashContext[k].(type) { - case []map[string]interface{}: - hashContext = t[len(t)-1] - case map[string]interface{}: - hashContext = t - default: - p.panicf("Key '%s' was already created as a hash.", keyContext) - } - } - - p.context = keyContext - if array { - // If this is the first element for this array, then allocate a new - // list of tables for it. - k := key[len(key)-1] - if _, ok := hashContext[k]; !ok { - hashContext[k] = make([]map[string]interface{}, 0, 4) - } - - // Add a new table. But make sure the key hasn't already been used - // for something else. - if hash, ok := hashContext[k].([]map[string]interface{}); ok { - hashContext[k] = append(hash, make(map[string]interface{})) - } else { - p.panicf("Key '%s' was already created and cannot be used as an array.", key) - } - } else { - p.setValue(key[len(key)-1], make(map[string]interface{})) - } - p.context = append(p.context, key[len(key)-1]) -} - -// set calls setValue and setType. -func (p *parser) set(key string, val interface{}, typ tomlType, pos Position) { - p.setValue(key, val) - p.setType(key, typ, pos) -} - -// setValue sets the given key to the given value in the current context. -// It will make sure that the key hasn't already been defined, account for -// implicit key groups. -func (p *parser) setValue(key string, value interface{}) { - var ( - tmpHash interface{} - ok bool - hash = p.mapping - keyContext Key - ) - for _, k := range p.context { - keyContext = append(keyContext, k) - if tmpHash, ok = hash[k]; !ok { - p.bug("Context for key '%s' has not been established.", keyContext) - } - switch t := tmpHash.(type) { - case []map[string]interface{}: - // The context is a table of hashes. Pick the most recent table - // defined as the current hash. - hash = t[len(t)-1] - case map[string]interface{}: - hash = t - default: - p.panicf("Key '%s' has already been defined.", keyContext) - } - } - keyContext = append(keyContext, key) - - if _, ok := hash[key]; ok { - // Normally redefining keys isn't allowed, but the key could have been - // defined implicitly and it's allowed to be redefined concretely. (See - // the `valid/implicit-and-explicit-after.toml` in toml-test) - // - // But we have to make sure to stop marking it as an implicit. (So that - // another redefinition provokes an error.) - // - // Note that since it has already been defined (as a hash), we don't - // want to overwrite it. So our business is done. - if p.isArray(keyContext) { - p.removeImplicit(keyContext) - hash[key] = value - return - } - if p.isImplicit(keyContext) { - p.removeImplicit(keyContext) - return - } - - // Otherwise, we have a concrete key trying to override a previous - // key, which is *always* wrong. - p.panicf("Key '%s' has already been defined.", keyContext) - } - - hash[key] = value -} - -// setType sets the type of a particular value at a given key. It should be -// called immediately AFTER setValue. -// -// Note that if `key` is empty, then the type given will be applied to the -// current context (which is either a table or an array of tables). -func (p *parser) setType(key string, typ tomlType, pos Position) { - keyContext := make(Key, 0, len(p.context)+1) - keyContext = append(keyContext, p.context...) - if len(key) > 0 { // allow type setting for hashes - keyContext = append(keyContext, key) - } - // Special case to make empty keys ("" = 1) work. - // Without it it will set "" rather than `""`. - // TODO: why is this needed? And why is this only needed here? - if len(keyContext) == 0 { - keyContext = Key{""} - } - p.keyInfo[keyContext.String()] = keyInfo{tomlType: typ, pos: pos} -} - -// Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and -// "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly). -func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} } -func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) } -func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok } -func (p *parser) isArray(key Key) bool { return p.keyInfo[key.String()].tomlType == tomlArray } -func (p *parser) addImplicitContext(key Key) { p.addImplicit(key); p.addContext(key, false) } - -// current returns the full key name of the current context. -func (p *parser) current() string { - if len(p.currentKey) == 0 { - return p.context.String() - } - if len(p.context) == 0 { - return p.currentKey - } - return fmt.Sprintf("%s.%s", p.context, p.currentKey) -} - -func stripFirstNewline(s string) string { - if len(s) > 0 && s[0] == '\n' { - return s[1:] - } - if len(s) > 1 && s[0] == '\r' && s[1] == '\n' { - return s[2:] - } - return s -} - -// stripEscapedNewlines removes whitespace after line-ending backslashes in -// multiline strings. -// -// A line-ending backslash is an unescaped \ followed only by whitespace until -// the next newline. After a line-ending backslash, all whitespace is removed -// until the next non-whitespace character. -func (p *parser) stripEscapedNewlines(s string) string { - var b strings.Builder - var i int - for { - ix := strings.Index(s[i:], `\`) - if ix < 0 { - b.WriteString(s) - return b.String() - } - i += ix - - if len(s) > i+1 && s[i+1] == '\\' { - // Escaped backslash. - i += 2 - continue - } - // Scan until the next non-whitespace. - j := i + 1 - whitespaceLoop: - for ; j < len(s); j++ { - switch s[j] { - case ' ', '\t', '\r', '\n': - default: - break whitespaceLoop - } - } - if j == i+1 { - // Not a whitespace escape. - i++ - continue - } - if !strings.Contains(s[i:j], "\n") { - // This is not a line-ending backslash. - // (It's a bad escape sequence, but we can let - // replaceEscapes catch it.) - i++ - continue - } - b.WriteString(s[:i]) - s = s[j:] - i = 0 - } -} - -func (p *parser) replaceEscapes(it item, str string) string { - replaced := make([]rune, 0, len(str)) - s := []byte(str) - r := 0 - for r < len(s) { - if s[r] != '\\' { - c, size := utf8.DecodeRune(s[r:]) - r += size - replaced = append(replaced, c) - continue - } - r += 1 - if r >= len(s) { - p.bug("Escape sequence at end of string.") - return "" - } - switch s[r] { - default: - p.bug("Expected valid escape code after \\, but got %q.", s[r]) - case ' ', '\t': - p.panicItemf(it, "invalid escape: '\\%c'", s[r]) - case 'b': - replaced = append(replaced, rune(0x0008)) - r += 1 - case 't': - replaced = append(replaced, rune(0x0009)) - r += 1 - case 'n': - replaced = append(replaced, rune(0x000A)) - r += 1 - case 'f': - replaced = append(replaced, rune(0x000C)) - r += 1 - case 'r': - replaced = append(replaced, rune(0x000D)) - r += 1 - case 'e': - if p.tomlNext { - replaced = append(replaced, rune(0x001B)) - r += 1 - } - case '"': - replaced = append(replaced, rune(0x0022)) - r += 1 - case '\\': - replaced = append(replaced, rune(0x005C)) - r += 1 - case 'x': - if p.tomlNext { - escaped := p.asciiEscapeToUnicode(it, s[r+1:r+3]) - replaced = append(replaced, escaped) - r += 3 - } - case 'u': - // At this point, we know we have a Unicode escape of the form - // `uXXXX` at [r, r+5). (Because the lexer guarantees this - // for us.) - escaped := p.asciiEscapeToUnicode(it, s[r+1:r+5]) - replaced = append(replaced, escaped) - r += 5 - case 'U': - // At this point, we know we have a Unicode escape of the form - // `uXXXX` at [r, r+9). (Because the lexer guarantees this - // for us.) - escaped := p.asciiEscapeToUnicode(it, s[r+1:r+9]) - replaced = append(replaced, escaped) - r += 9 - } - } - return string(replaced) -} - -func (p *parser) asciiEscapeToUnicode(it item, bs []byte) rune { - s := string(bs) - hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) - if err != nil { - p.bug("Could not parse '%s' as a hexadecimal number, but the lexer claims it's OK: %s", s, err) - } - if !utf8.ValidRune(rune(hex)) { - p.panicItemf(it, "Escaped character '\\u%s' is not valid UTF-8.", s) - } - return rune(hex) -} diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go deleted file mode 100644 index 254ca82e54..0000000000 --- a/vendor/github.com/BurntSushi/toml/type_fields.go +++ /dev/null @@ -1,242 +0,0 @@ -package toml - -// Struct field handling is adapted from code in encoding/json: -// -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the Go distribution. - -import ( - "reflect" - "sort" - "sync" -) - -// A field represents a single field found in a struct. -type field struct { - name string // the name of the field (`toml` tag included) - tag bool // whether field has a `toml` tag - index []int // represents the depth of an anonymous field - typ reflect.Type // the type of the field -} - -// byName sorts field by name, breaking ties with depth, -// then breaking ties with "name came from toml tag", then -// breaking ties with index sequence. -type byName []field - -func (x byName) Len() int { return len(x) } - -func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byName) Less(i, j int) bool { - if x[i].name != x[j].name { - return x[i].name < x[j].name - } - if len(x[i].index) != len(x[j].index) { - return len(x[i].index) < len(x[j].index) - } - if x[i].tag != x[j].tag { - return x[i].tag - } - return byIndex(x).Less(i, j) -} - -// byIndex sorts field by index sequence. -type byIndex []field - -func (x byIndex) Len() int { return len(x) } - -func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byIndex) Less(i, j int) bool { - for k, xik := range x[i].index { - if k >= len(x[j].index) { - return false - } - if xik != x[j].index[k] { - return xik < x[j].index[k] - } - } - return len(x[i].index) < len(x[j].index) -} - -// typeFields returns a list of fields that TOML should recognize for the given -// type. The algorithm is breadth-first search over the set of structs to -// include - the top struct and then any reachable anonymous structs. -func typeFields(t reflect.Type) []field { - // Anonymous fields to explore at the current level and the next. - current := []field{} - next := []field{{typ: t}} - - // Count of queued names for current level and the next. - var count map[reflect.Type]int - var nextCount map[reflect.Type]int - - // Types already visited at an earlier level. - visited := map[reflect.Type]bool{} - - // Fields found. - var fields []field - - for len(next) > 0 { - current, next = next, current[:0] - count, nextCount = nextCount, map[reflect.Type]int{} - - for _, f := range current { - if visited[f.typ] { - continue - } - visited[f.typ] = true - - // Scan f.typ for fields to include. - for i := 0; i < f.typ.NumField(); i++ { - sf := f.typ.Field(i) - if sf.PkgPath != "" && !sf.Anonymous { // unexported - continue - } - opts := getOptions(sf.Tag) - if opts.skip { - continue - } - index := make([]int, len(f.index)+1) - copy(index, f.index) - index[len(f.index)] = i - - ft := sf.Type - if ft.Name() == "" && ft.Kind() == reflect.Ptr { - // Follow pointer. - ft = ft.Elem() - } - - // Record found field and index sequence. - if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { - tagged := opts.name != "" - name := opts.name - if name == "" { - name = sf.Name - } - fields = append(fields, field{name, tagged, index, ft}) - if count[f.typ] > 1 { - // If there were multiple instances, add a second, - // so that the annihilation code will see a duplicate. - // It only cares about the distinction between 1 or 2, - // so don't bother generating any more copies. - fields = append(fields, fields[len(fields)-1]) - } - continue - } - - // Record new anonymous struct to explore in next round. - nextCount[ft]++ - if nextCount[ft] == 1 { - f := field{name: ft.Name(), index: index, typ: ft} - next = append(next, f) - } - } - } - } - - sort.Sort(byName(fields)) - - // Delete all fields that are hidden by the Go rules for embedded fields, - // except that fields with TOML tags are promoted. - - // The fields are sorted in primary order of name, secondary order - // of field index length. Loop over names; for each name, delete - // hidden fields by choosing the one dominant field that survives. - out := fields[:0] - for advance, i := 0, 0; i < len(fields); i += advance { - // One iteration per name. - // Find the sequence of fields with the name of this first field. - fi := fields[i] - name := fi.name - for advance = 1; i+advance < len(fields); advance++ { - fj := fields[i+advance] - if fj.name != name { - break - } - } - if advance == 1 { // Only one field with this name - out = append(out, fi) - continue - } - dominant, ok := dominantField(fields[i : i+advance]) - if ok { - out = append(out, dominant) - } - } - - fields = out - sort.Sort(byIndex(fields)) - - return fields -} - -// dominantField looks through the fields, all of which are known to -// have the same name, to find the single field that dominates the -// others using Go's embedding rules, modified by the presence of -// TOML tags. If there are multiple top-level fields, the boolean -// will be false: This condition is an error in Go and we skip all -// the fields. -func dominantField(fields []field) (field, bool) { - // The fields are sorted in increasing index-length order. The winner - // must therefore be one with the shortest index length. Drop all - // longer entries, which is easy: just truncate the slice. - length := len(fields[0].index) - tagged := -1 // Index of first tagged field. - for i, f := range fields { - if len(f.index) > length { - fields = fields[:i] - break - } - if f.tag { - if tagged >= 0 { - // Multiple tagged fields at the same level: conflict. - // Return no field. - return field{}, false - } - tagged = i - } - } - if tagged >= 0 { - return fields[tagged], true - } - // All remaining fields have the same length. If there's more than one, - // we have a conflict (two fields named "X" at the same level) and we - // return no field. - if len(fields) > 1 { - return field{}, false - } - return fields[0], true -} - -var fieldCache struct { - sync.RWMutex - m map[reflect.Type][]field -} - -// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. -func cachedTypeFields(t reflect.Type) []field { - fieldCache.RLock() - f := fieldCache.m[t] - fieldCache.RUnlock() - if f != nil { - return f - } - - // Compute fields without lock. - // Might duplicate effort but won't hold other computations back. - f = typeFields(t) - if f == nil { - f = []field{} - } - - fieldCache.Lock() - if fieldCache.m == nil { - fieldCache.m = map[reflect.Type][]field{} - } - fieldCache.m[t] = f - fieldCache.Unlock() - return f -} diff --git a/vendor/github.com/BurntSushi/toml/type_toml.go b/vendor/github.com/BurntSushi/toml/type_toml.go deleted file mode 100644 index 4e90d77373..0000000000 --- a/vendor/github.com/BurntSushi/toml/type_toml.go +++ /dev/null @@ -1,70 +0,0 @@ -package toml - -// tomlType represents any Go type that corresponds to a TOML type. -// While the first draft of the TOML spec has a simplistic type system that -// probably doesn't need this level of sophistication, we seem to be militating -// toward adding real composite types. -type tomlType interface { - typeString() string -} - -// typeEqual accepts any two types and returns true if they are equal. -func typeEqual(t1, t2 tomlType) bool { - if t1 == nil || t2 == nil { - return false - } - return t1.typeString() == t2.typeString() -} - -func typeIsTable(t tomlType) bool { - return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) -} - -type tomlBaseType string - -func (btype tomlBaseType) typeString() string { - return string(btype) -} - -func (btype tomlBaseType) String() string { - return btype.typeString() -} - -var ( - tomlInteger tomlBaseType = "Integer" - tomlFloat tomlBaseType = "Float" - tomlDatetime tomlBaseType = "Datetime" - tomlString tomlBaseType = "String" - tomlBool tomlBaseType = "Bool" - tomlArray tomlBaseType = "Array" - tomlHash tomlBaseType = "Hash" - tomlArrayHash tomlBaseType = "ArrayHash" -) - -// typeOfPrimitive returns a tomlType of any primitive value in TOML. -// Primitive values are: Integer, Float, Datetime, String and Bool. -// -// Passing a lexer item other than the following will cause a BUG message -// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime. -func (p *parser) typeOfPrimitive(lexItem item) tomlType { - switch lexItem.typ { - case itemInteger: - return tomlInteger - case itemFloat: - return tomlFloat - case itemDatetime: - return tomlDatetime - case itemString: - return tomlString - case itemMultilineString: - return tomlString - case itemRawString: - return tomlString - case itemRawMultilineString: - return tomlString - case itemBool: - return tomlBool - } - p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) - panic("unreachable") -} diff --git a/vendor/github.com/gomarkdown/markdown/LICENSE.txt b/vendor/github.com/gomarkdown/markdown/LICENSE.txt deleted file mode 100644 index 6880461027..0000000000 --- a/vendor/github.com/gomarkdown/markdown/LICENSE.txt +++ /dev/null @@ -1,31 +0,0 @@ -Markdown is distributed under the Simplified BSD License: - -Copyright © 2011 Russ Ross -Copyright © 2018 Krzysztof Kowalczyk -Copyright © 2018 Authors -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided with - the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gomarkdown/markdown/ast/doc.go b/vendor/github.com/gomarkdown/markdown/ast/doc.go deleted file mode 100644 index 376dc67cc2..0000000000 --- a/vendor/github.com/gomarkdown/markdown/ast/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -/* -Package ast defines tree representation of a parsed markdown document. -*/ -package ast diff --git a/vendor/github.com/gomarkdown/markdown/ast/node.go b/vendor/github.com/gomarkdown/markdown/ast/node.go deleted file mode 100644 index 8f802db82b..0000000000 --- a/vendor/github.com/gomarkdown/markdown/ast/node.go +++ /dev/null @@ -1,581 +0,0 @@ -package ast - -// An attribute can be attached to block elements. They are specified as -// {#id .classs key="value"} where quotes for values are mandatory, multiple -// key/value pairs are separated by whitespace. -type Attribute struct { - ID []byte - Classes [][]byte - Attrs map[string][]byte -} - -// ListType contains bitwise or'ed flags for list and list item objects. -type ListType int - -// These are the possible flag values for the ListItem renderer. -// Multiple flag values may be ORed together. -// These are mostly of interest if you are writing a new output format. -const ( - ListTypeOrdered ListType = 1 << iota - ListTypeDefinition - ListTypeTerm - - ListItemContainsBlock - ListItemBeginningOfList // TODO: figure out if this is of any use now - ListItemEndOfList -) - -// CellAlignFlags holds a type of alignment in a table cell. -type CellAlignFlags int - -// These are the possible flag values for the table cell renderer. -// Only a single one of these values will be used; they are not ORed together. -// These are mostly of interest if you are writing a new output format. -const ( - TableAlignmentLeft CellAlignFlags = 1 << iota - TableAlignmentRight - TableAlignmentCenter = (TableAlignmentLeft | TableAlignmentRight) -) - -func (a CellAlignFlags) String() string { - switch a { - case TableAlignmentLeft: - return "left" - case TableAlignmentRight: - return "right" - case TableAlignmentCenter: - return "center" - default: - return "" - } -} - -// DocumentMatters holds the type of a {front,main,back}matter in the document -type DocumentMatters int - -// These are all possible Document divisions. -const ( - DocumentMatterNone DocumentMatters = iota - DocumentMatterFront - DocumentMatterMain - DocumentMatterBack -) - -// CitationTypes holds the type of a citation, informative, normative or suppressed -type CitationTypes int - -const ( - CitationTypeNone CitationTypes = iota - CitationTypeSuppressed - CitationTypeInformative - CitationTypeNormative -) - -// Node defines an ast node -type Node interface { - AsContainer() *Container - AsLeaf() *Leaf - GetParent() Node - SetParent(newParent Node) - GetChildren() []Node - SetChildren(newChildren []Node) -} - -// Container is a type of node that can contain children -type Container struct { - Parent Node - Children []Node - - Literal []byte // Text contents of the leaf nodes - Content []byte // Markdown content of the block nodes - - *Attribute // Block level attribute -} - -// return true if can contain children of a given node type -// used by custom nodes to over-ride logic in canNodeContain -type CanContain interface { - CanContain(Node) bool -} - -// AsContainer returns itself as *Container -func (c *Container) AsContainer() *Container { - return c -} - -// AsLeaf returns nil -func (c *Container) AsLeaf() *Leaf { - return nil -} - -// GetParent returns parent node -func (c *Container) GetParent() Node { - return c.Parent -} - -// SetParent sets the parent node -func (c *Container) SetParent(newParent Node) { - c.Parent = newParent -} - -// GetChildren returns children nodes -func (c *Container) GetChildren() []Node { - return c.Children -} - -// SetChildren sets children node -func (c *Container) SetChildren(newChildren []Node) { - c.Children = newChildren -} - -// Leaf is a type of node that cannot have children -type Leaf struct { - Parent Node - - Literal []byte // Text contents of the leaf nodes - Content []byte // Markdown content of the block nodes - - *Attribute // Block level attribute -} - -// AsContainer returns nil -func (l *Leaf) AsContainer() *Container { - return nil -} - -// AsLeaf returns itself as *Leaf -func (l *Leaf) AsLeaf() *Leaf { - return l -} - -// GetParent returns parent node -func (l *Leaf) GetParent() Node { - return l.Parent -} - -// SetParent sets the parent nodd -func (l *Leaf) SetParent(newParent Node) { - l.Parent = newParent -} - -// GetChildren returns nil because Leaf cannot have children -func (l *Leaf) GetChildren() []Node { - return nil -} - -// SetChildren will panic if trying to set non-empty children -// because Leaf cannot have children -func (l *Leaf) SetChildren(newChildren []Node) { - if len(newChildren) != 0 { - panic("leaf node cannot have children") - } - -} - -// Document represents markdown document node, a root of ast -type Document struct { - Container -} - -// DocumentMatter represents markdown node that signals a document -// division: frontmatter, mainmatter or backmatter. -type DocumentMatter struct { - Container - - Matter DocumentMatters -} - -// BlockQuote represents markdown block quote node -type BlockQuote struct { - Container -} - -// Aside represents an markdown aside node. -type Aside struct { - Container -} - -// List represents markdown list node -type List struct { - Container - - ListFlags ListType - Tight bool // Skip

s around list item data if true - BulletChar byte // '*', '+' or '-' in bullet lists - Delimiter byte // '.' or ')' after the number in ordered lists - Start int // for ordered lists this indicates the starting number if > 0 - RefLink []byte // If not nil, turns this list item into a footnote item and triggers different rendering - IsFootnotesList bool // This is a list of footnotes -} - -// ListItem represents markdown list item node -type ListItem struct { - Container - - ListFlags ListType - Tight bool // Skip

s around list item data if true - BulletChar byte // '*', '+' or '-' in bullet lists - Delimiter byte // '.' or ')' after the number in ordered lists - RefLink []byte // If not nil, turns this list item into a footnote item and triggers different rendering - IsFootnotesList bool // This is a list of footnotes -} - -// Paragraph represents markdown paragraph node -type Paragraph struct { - Container -} - -// Math represents markdown MathAjax inline node -type Math struct { - Leaf -} - -// MathBlock represents markdown MathAjax block node -type MathBlock struct { - Container -} - -// Heading represents markdown heading node -type Heading struct { - Container - - Level int // This holds the heading level number - HeadingID string // This might hold heading ID, if present - IsTitleblock bool // Specifies whether it's a title block - IsSpecial bool // We are a special heading (starts with .#) -} - -// HorizontalRule represents markdown horizontal rule node -type HorizontalRule struct { - Leaf -} - -// Emph represents markdown emphasis node -type Emph struct { - Container -} - -// Strong represents markdown strong node -type Strong struct { - Container -} - -// Del represents markdown del node -type Del struct { - Container -} - -// Link represents markdown link node -type Link struct { - Container - - Destination []byte // Destination is what goes into a href - Title []byte // Title is the tooltip thing that goes in a title attribute - NoteID int // NoteID contains a serial number of a footnote, zero if it's not a footnote - Footnote Node // If it's a footnote, this is a direct link to the footnote Node. Otherwise nil. - DeferredID []byte // If a deferred link this holds the original ID. - AdditionalAttributes []string // Defines additional attributes to use during rendering. -} - -// CrossReference is a reference node. -type CrossReference struct { - Container - - Destination []byte // Destination is where the reference points to - Suffix []byte // Potential citation suffix, i.e. (#myid, text) -} - -// Citation is a citation node. -type Citation struct { - Leaf - - Destination [][]byte // Destination is where the citation points to. Multiple ones are allowed. - Type []CitationTypes // 1:1 mapping of destination and citation type - Suffix [][]byte // Potential citation suffix, i.e. [@!RFC1035, p. 144] -} - -// Image represents markdown image node -type Image struct { - Container - - Destination []byte // Destination is what goes into a href - Title []byte // Title is the tooltip thing that goes in a title attribute -} - -// Text represents markdown text node -type Text struct { - Leaf -} - -// HTMLBlock represents markdown html node -type HTMLBlock struct { - Leaf -} - -// CodeBlock represents markdown code block node -type CodeBlock struct { - Leaf - - IsFenced bool // Specifies whether it's a fenced code block or an indented one - Info []byte // This holds the info string - FenceChar byte - FenceLength int - FenceOffset int -} - -// Softbreak represents markdown softbreak node -// Note: not used currently -type Softbreak struct { - Leaf -} - -// Hardbreak represents markdown hard break node -type Hardbreak struct { - Leaf -} - -// NonBlockingSpace represents markdown non-blocking space node -type NonBlockingSpace struct { - Leaf -} - -// Code represents markdown code node -type Code struct { - Leaf -} - -// HTMLSpan represents markdown html span node -type HTMLSpan struct { - Leaf -} - -// Table represents markdown table node -type Table struct { - Container -} - -// TableCell represents markdown table cell node -type TableCell struct { - Container - - IsHeader bool // This tells if it's under the header row - Align CellAlignFlags // This holds the value for align attribute - ColSpan int // How many columns to span -} - -// TableHeader represents markdown table head node -type TableHeader struct { - Container -} - -// TableBody represents markdown table body node -type TableBody struct { - Container -} - -// TableRow represents markdown table row node -type TableRow struct { - Container -} - -// TableFooter represents markdown table foot node -type TableFooter struct { - Container -} - -// Caption represents a figure, code or quote caption -type Caption struct { - Container -} - -// CaptionFigure is a node (blockquote or codeblock) that has a caption -type CaptionFigure struct { - Container - - HeadingID string // This might hold heading ID, if present -} - -// Callout is a node that can exist both in text (where it is an actual node) and in a code block. -type Callout struct { - Leaf - - ID []byte // number of this callout -} - -// Index is a node that contains an Index item and an optional, subitem. -type Index struct { - Leaf - - Primary bool - Item []byte - Subitem []byte - ID string // ID of the index -} - -// Subscript is a subscript node -type Subscript struct { - Leaf -} - -// Subscript is a superscript node -type Superscript struct { - Leaf -} - -// Footnotes is a node that contains all footnotes -type Footnotes struct { - Container -} - -func removeNodeFromArray(a []Node, node Node) []Node { - n := len(a) - for i := 0; i < n; i++ { - if a[i] == node { - return append(a[:i], a[i+1:]...) - } - } - return nil -} - -// AppendChild appends child to children of parent -// It panics if either node is nil. -func AppendChild(parent Node, child Node) { - RemoveFromTree(child) - child.SetParent(parent) - newChildren := append(parent.GetChildren(), child) - parent.SetChildren(newChildren) -} - -// RemoveFromTree removes this node from tree -func RemoveFromTree(n Node) { - if n.GetParent() == nil { - return - } - // important: don't clear n.Children if n has no parent - // we're called from AppendChild and that might happen on a node - // that accumulated Children but hasn't been inserted into the tree - n.SetChildren(nil) - p := n.GetParent() - newChildren := removeNodeFromArray(p.GetChildren(), n) - if newChildren != nil { - p.SetChildren(newChildren) - } -} - -// GetLastChild returns last child of node n -// It's implemented as stand-alone function to keep Node interface small -func GetLastChild(n Node) Node { - a := n.GetChildren() - if len(a) > 0 { - return a[len(a)-1] - } - return nil -} - -// GetFirstChild returns first child of node n -// It's implemented as stand-alone function to keep Node interface small -func GetFirstChild(n Node) Node { - a := n.GetChildren() - if len(a) > 0 { - return a[0] - } - return nil -} - -// GetNextNode returns next sibling of node n (node after n) -// We can't make it part of Container or Leaf because we loose Node identity -func GetNextNode(n Node) Node { - parent := n.GetParent() - if parent == nil { - return nil - } - a := parent.GetChildren() - len := len(a) - 1 - for i := 0; i < len; i++ { - if a[i] == n { - return a[i+1] - } - } - return nil -} - -// GetPrevNode returns previous sibling of node n (node before n) -// We can't make it part of Container or Leaf because we loose Node identity -func GetPrevNode(n Node) Node { - parent := n.GetParent() - if parent == nil { - return nil - } - a := parent.GetChildren() - len := len(a) - for i := 1; i < len; i++ { - if a[i] == n { - return a[i-1] - } - } - return nil -} - -// WalkStatus allows NodeVisitor to have some control over the tree traversal. -// It is returned from NodeVisitor and different values allow Node.Walk to -// decide which node to go to next. -type WalkStatus int - -const ( - // GoToNext is the default traversal of every node. - GoToNext WalkStatus = iota - // SkipChildren tells walker to skip all children of current node. - SkipChildren - // Terminate tells walker to terminate the traversal. - Terminate -) - -// NodeVisitor is a callback to be called when traversing the syntax tree. -// Called twice for every node: once with entering=true when the branch is -// first visited, then with entering=false after all the children are done. -type NodeVisitor interface { - Visit(node Node, entering bool) WalkStatus -} - -// NodeVisitorFunc casts a function to match NodeVisitor interface -type NodeVisitorFunc func(node Node, entering bool) WalkStatus - -// Walk traverses tree recursively -func Walk(n Node, visitor NodeVisitor) WalkStatus { - isContainer := n.AsContainer() != nil - status := visitor.Visit(n, true) // entering - if status == Terminate { - // even if terminating, close container node - if isContainer { - visitor.Visit(n, false) - } - return status - } - if isContainer && status != SkipChildren { - children := n.GetChildren() - for _, n := range children { - status = Walk(n, visitor) - if status == Terminate { - return status - } - } - } - if isContainer { - status = visitor.Visit(n, false) // exiting - if status == Terminate { - return status - } - } - return GoToNext -} - -// Visit calls visitor function -func (f NodeVisitorFunc) Visit(node Node, entering bool) WalkStatus { - return f(node, entering) -} - -// WalkFunc is like Walk but accepts just a callback function -func WalkFunc(n Node, f NodeVisitorFunc) { - visitor := NodeVisitorFunc(f) - Walk(n, visitor) -} diff --git a/vendor/github.com/gomarkdown/markdown/ast/print.go b/vendor/github.com/gomarkdown/markdown/ast/print.go deleted file mode 100644 index a4e3d62467..0000000000 --- a/vendor/github.com/gomarkdown/markdown/ast/print.go +++ /dev/null @@ -1,168 +0,0 @@ -package ast - -import ( - "bytes" - "fmt" - "io" - "strings" - "unicode/utf8" -) - -// Print is for debugging. It prints a string representation of parsed -// markdown doc (result of parser.Parse()) to dst. -// -// To make output readable, it shortens text output. -func Print(dst io.Writer, doc Node) { - PrintWithPrefix(dst, doc, " ") -} - -// PrintWithPrefix is like Print but allows customizing prefix used for -// indentation. By default it's 2 spaces. You can change it to e.g. tab -// by passing "\t" -func PrintWithPrefix(w io.Writer, doc Node, prefix string) { - // for more compact output, don't print outer Document - if _, ok := doc.(*Document); ok { - for _, c := range doc.GetChildren() { - printRecur(w, c, prefix, 0) - } - } else { - printRecur(w, doc, prefix, 0) - } -} - -// ToString is like Dump but returns result as a string -func ToString(doc Node) string { - var buf bytes.Buffer - Print(&buf, doc) - return buf.String() -} - -func contentToString(d1 []byte, d2 []byte) string { - if d1 != nil { - return string(d1) - } - if d2 != nil { - return string(d2) - } - return "" -} - -func getContent(node Node) string { - if c := node.AsContainer(); c != nil { - return contentToString(c.Literal, c.Content) - } - leaf := node.AsLeaf() - return contentToString(leaf.Literal, leaf.Content) -} - -func shortenString(s string, maxLen int) string { - // for cleaner, one-line ouput, replace some white-space chars - // with their escaped version - s = strings.Replace(s, "\n", `\n`, -1) - s = strings.Replace(s, "\r", `\r`, -1) - s = strings.Replace(s, "\t", `\t`, -1) - if maxLen < 0 { - return s - } - if utf8.RuneCountInString(s) < maxLen { - return s - } - // add "…" to indicate truncation - return string(append([]rune(s)[:maxLen-3], '…')) -} - -// get a short name of the type of v which excludes package name -// and strips "()" from the end -func getNodeType(node Node) string { - s := fmt.Sprintf("%T", node) - s = strings.TrimSuffix(s, "()") - if idx := strings.Index(s, "."); idx != -1 { - return s[idx+1:] - } - return s -} - -func printDefault(w io.Writer, indent string, typeName string, content string) { - content = strings.TrimSpace(content) - if len(content) > 0 { - fmt.Fprintf(w, "%s%s '%s'\n", indent, typeName, content) - } else { - fmt.Fprintf(w, "%s%s\n", indent, typeName) - } -} - -func getListFlags(f ListType) string { - var s string - if f&ListTypeOrdered != 0 { - s += "ordered " - } - if f&ListTypeDefinition != 0 { - s += "definition " - } - if f&ListTypeTerm != 0 { - s += "term " - } - if f&ListItemContainsBlock != 0 { - s += "has_block " - } - if f&ListItemBeginningOfList != 0 { - s += "start " - } - if f&ListItemEndOfList != 0 { - s += "end " - } - s = strings.TrimSpace(s) - return s -} - -func printRecur(w io.Writer, node Node, prefix string, depth int) { - if node == nil { - return - } - indent := strings.Repeat(prefix, depth) - - content := shortenString(getContent(node), 40) - typeName := getNodeType(node) - switch v := node.(type) { - case *Link: - content := "url=" + string(v.Destination) - printDefault(w, indent, typeName, content) - case *Image: - content := "url=" + string(v.Destination) - printDefault(w, indent, typeName, content) - case *List: - if v.Start > 1 { - content += fmt.Sprintf("start=%d ", v.Start) - } - if v.Tight { - content += "tight " - } - if v.IsFootnotesList { - content += "footnotes " - } - flags := getListFlags(v.ListFlags) - if len(flags) > 0 { - content += "flags=" + flags + " " - } - printDefault(w, indent, typeName, content) - case *ListItem: - if v.Tight { - content += "tight " - } - if v.IsFootnotesList { - content += "footnotes " - } - flags := getListFlags(v.ListFlags) - if len(flags) > 0 { - content += "flags=" + flags + " " - } - printDefault(w, indent, typeName, content) - case *CodeBlock: - printDefault(w, indent, typeName + ":" + string(v.Info), content) - default: - printDefault(w, indent, typeName, content) - } - for _, child := range node.GetChildren() { - printRecur(w, child, prefix, depth+1) - } -} diff --git a/vendor/github.com/gomarkdown/markdown/html/doc.go b/vendor/github.com/gomarkdown/markdown/html/doc.go deleted file mode 100644 index 34bbbe23e4..0000000000 --- a/vendor/github.com/gomarkdown/markdown/html/doc.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Package html implements HTML renderer of parsed markdown document. - -Configuring and customizing a renderer - -A renderer can be configured with multiple options: - - import "github.com/gomarkdown/markdown/html" - - flags := html.CommonFlags | html.CompletePage | html.HrefTargetBlank - opts := html.RendererOptions{ - Title: "A custom title", - Flags: flags, - } - renderer := html.NewRenderer(opts) - -You can also re-use most of the logic and customize rendering of selected nodes -by providing node render hook. -This is most useful for rendering nodes that allow for design choices, like -links or code blocks. - - import ( - "github.com/gomarkdown/markdown/html" - "github.com/gomarkdown/markdown/ast" - ) - - // a very dummy render hook that will output "code_replacements" instead of - // ${content} emitted by html.Renderer - func renderHookCodeBlock(w io.Writer, node ast.Node, entering bool) (ast.WalkStatus, bool) { - _, ok := node.(*ast.CodeBlock) - if !ok { - return ast.GoToNext, false - } - io.WriteString(w, "code_replacement") - return ast.GoToNext, true - } - - opts := html.RendererOptions{ - RenderNodeHook: renderHookCodeBlock, - } - renderer := html.NewRenderer(opts) -*/ -package html diff --git a/vendor/github.com/gomarkdown/markdown/html/renderer.go b/vendor/github.com/gomarkdown/markdown/html/renderer.go deleted file mode 100644 index 494e7540b4..0000000000 --- a/vendor/github.com/gomarkdown/markdown/html/renderer.go +++ /dev/null @@ -1,1339 +0,0 @@ -package html - -import ( - "bytes" - "fmt" - "html" - "io" - "regexp" - "sort" - "strconv" - "strings" - - "github.com/gomarkdown/markdown/ast" - "github.com/gomarkdown/markdown/parser" -) - -// Flags control optional behavior of HTML renderer. -type Flags int - -// IDTag is the tag used for tag identification, it defaults to "id", some renderers -// may wish to override this and use e.g. "anchor". -var IDTag = "id" - -// HTML renderer configuration options. -const ( - FlagsNone Flags = 0 - SkipHTML Flags = 1 << iota // Skip preformatted HTML blocks - SkipImages // Skip embedded images - SkipLinks // Skip all links - Safelink // Only link to trusted protocols - NofollowLinks // Only link with rel="nofollow" - NoreferrerLinks // Only link with rel="noreferrer" - NoopenerLinks // Only link with rel="noopener" - HrefTargetBlank // Add a blank target - CompletePage // Generate a complete HTML page - UseXHTML // Generate XHTML output instead of HTML - FootnoteReturnLinks // Generate a link at the end of a footnote to return to the source - FootnoteNoHRTag // Do not output an HR after starting a footnote list. - Smartypants // Enable smart punctuation substitutions - SmartypantsFractions // Enable smart fractions (with Smartypants) - SmartypantsDashes // Enable smart dashes (with Smartypants) - SmartypantsLatexDashes // Enable LaTeX-style dashes (with Smartypants) - SmartypantsAngledQuotes // Enable angled double quotes (with Smartypants) for double quotes rendering - SmartypantsQuotesNBSP // Enable « French guillemets » (with Smartypants) - TOC // Generate a table of contents - LazyLoadImages // Include loading="lazy" with images - - CommonFlags Flags = Smartypants | SmartypantsFractions | SmartypantsDashes | SmartypantsLatexDashes -) - -var ( - htmlTagRe = regexp.MustCompile("(?i)^" + htmlTag) -) - -const ( - htmlTag = "(?:" + openTag + "|" + closeTag + "|" + htmlComment + "|" + - processingInstruction + "|" + declaration + "|" + cdata + ")" - closeTag = "]" - openTag = "<" + tagName + attribute + "*" + "\\s*/?>" - attribute = "(?:" + "\\s+" + attributeName + attributeValueSpec + "?)" - attributeValue = "(?:" + unquotedValue + "|" + singleQuotedValue + "|" + doubleQuotedValue + ")" - attributeValueSpec = "(?:" + "\\s*=" + "\\s*" + attributeValue + ")" - attributeName = "[a-zA-Z_:][a-zA-Z0-9:._-]*" - cdata = "" - declaration = "]*>" - doubleQuotedValue = "\"[^\"]*\"" - htmlComment = "|" - processingInstruction = "[<][?].*?[?][>]" - singleQuotedValue = "'[^']*'" - tagName = "[A-Za-z][A-Za-z0-9-]*" - unquotedValue = "[^\"'=<>`\\x00-\\x20]+" -) - -// RenderNodeFunc allows reusing most of Renderer logic and replacing -// rendering of some nodes. If it returns false, Renderer.RenderNode -// will execute its logic. If it returns true, Renderer.RenderNode will -// skip rendering this node and will return WalkStatus -type RenderNodeFunc func(w io.Writer, node ast.Node, entering bool) (ast.WalkStatus, bool) - -// RendererOptions is a collection of supplementary parameters tweaking -// the behavior of various parts of HTML renderer. -type RendererOptions struct { - // Prepend this text to each relative URL. - AbsolutePrefix string - // Add this text to each footnote anchor, to ensure uniqueness. - FootnoteAnchorPrefix string - // Show this text inside the tag for a footnote return link, if the - // FootnoteReturnLinks flag is enabled. If blank, the string - // [return] is used. - FootnoteReturnLinkContents string - // CitationFormatString defines how a citation is rendered. If blank, the string - // [%s] is used. Where %s will be substituted with the citation target. - CitationFormatString string - // If set, add this text to the front of each Heading ID, to ensure uniqueness. - HeadingIDPrefix string - // If set, add this text to the back of each Heading ID, to ensure uniqueness. - HeadingIDSuffix string - // can over-write

for paragraph tag - ParagraphTag string - - Title string // Document title (used if CompletePage is set) - CSS string // Optional CSS file URL (used if CompletePage is set) - Icon string // Optional icon file URL (used if CompletePage is set) - Head []byte // Optional head data injected in the section (used if CompletePage is set) - - Flags Flags // Flags allow customizing this renderer's behavior - - // if set, called at the start of RenderNode(). Allows replacing - // rendering of some nodes - RenderNodeHook RenderNodeFunc - - // Comments is a list of comments the renderer should detect when - // parsing code blocks and detecting callouts. - Comments [][]byte - - // Generator is a meta tag that is inserted in the generated HTML so show what rendered it. It should not include the closing tag. - // Defaults (note content quote is not closed) to ` " or ">" - - // Track heading IDs to prevent ID collision in a single generation. - headingIDs map[string]int - - lastOutputLen int - - // if > 0, will strip html tags in Out and Outs - DisableTags int - - // IsSafeURLOverride allows overriding the default URL matcher. URL is - // safe if the overriding function returns true. Can be used to extend - // the default list of safe URLs. - IsSafeURLOverride func(url []byte) bool - - sr *SPRenderer - - documentMatter ast.DocumentMatters // keep track of front/main/back matter. -} - -// Escaper defines how to escape HTML special characters -var Escaper = [256][]byte{ - '&': []byte("&"), - '<': []byte("<"), - '>': []byte(">"), - '"': []byte("""), -} - -// EscapeHTML writes html-escaped d to w. It escapes &, <, > and " characters. -func EscapeHTML(w io.Writer, d []byte) { - var start, end int - n := len(d) - for end < n { - escSeq := Escaper[d[end]] - if escSeq != nil { - w.Write(d[start:end]) - w.Write(escSeq) - start = end + 1 - } - end++ - } - if start < n && end <= n { - w.Write(d[start:end]) - } -} - -func EscLink(w io.Writer, text []byte) { - unesc := html.UnescapeString(string(text)) - EscapeHTML(w, []byte(unesc)) -} - -// Escape writes the text to w, but skips the escape character. -func Escape(w io.Writer, text []byte) { - esc := false - for i := 0; i < len(text); i++ { - if text[i] == '\\' { - esc = !esc - } - if esc && text[i] == '\\' { - continue - } - w.Write([]byte{text[i]}) - } -} - -// NewRenderer creates and configures an Renderer object, which -// satisfies the Renderer interface. -func NewRenderer(opts RendererOptions) *Renderer { - // configure the rendering engine - closeTag := ">" - if opts.Flags&UseXHTML != 0 { - closeTag = " />" - } - - if opts.FootnoteReturnLinkContents == "" { - opts.FootnoteReturnLinkContents = `[return]` - } - if opts.CitationFormatString == "" { - opts.CitationFormatString = `[%s]` - } - if opts.Generator == "" { - opts.Generator = ` 0 { - s += " " + strings.Join(attrs, " ") - } - io.WriteString(w, s+">") - r.lastOutputLen = 1 -} - -func FootnoteRef(prefix string, node *ast.Link) string { - urlFrag := prefix + string(Slugify(node.Destination)) - nStr := strconv.Itoa(node.NoteID) - anchor := `` + nStr + `` - return `` + anchor + `` -} - -func FootnoteItem(prefix string, slug []byte) string { - return `

  • ` -} - -func FootnoteReturnLink(prefix, returnLink string, slug []byte) string { - return ` ` + returnLink + `` -} - -func ListItemOpenCR(listItem *ast.ListItem) bool { - if ast.GetPrevNode(listItem) == nil { - return false - } - ld := listItem.Parent.(*ast.List) - return !ld.Tight && ld.ListFlags&ast.ListTypeDefinition == 0 -} - -func SkipParagraphTags(para *ast.Paragraph) bool { - parent := para.Parent - grandparent := parent.GetParent() - if grandparent == nil || !IsList(grandparent) { - return false - } - isParentTerm := IsListItemTerm(parent) - grandparentListData := grandparent.(*ast.List) - tightOrTerm := grandparentListData.Tight || isParentTerm - return tightOrTerm -} - -// Out is a helper to write data to writer -func (r *Renderer) Out(w io.Writer, d []byte) { - r.lastOutputLen = len(d) - if r.DisableTags > 0 { - d = htmlTagRe.ReplaceAll(d, []byte{}) - } - w.Write(d) -} - -// Outs is a helper to write data to writer -func (r *Renderer) Outs(w io.Writer, s string) { - r.lastOutputLen = len(s) - if r.DisableTags > 0 { - s = htmlTagRe.ReplaceAllString(s, "") - } - io.WriteString(w, s) -} - -// CR writes a new line -func (r *Renderer) CR(w io.Writer) { - if r.lastOutputLen > 0 { - r.Outs(w, "\n") - } -} - -var ( - openHTags = []string{"", "", "", "", ""} -) - -func HeadingOpenTagFromLevel(level int) string { - if level < 1 || level > 5 { - return " 5 { - return "" - } - return closeHTags[level-1] -} - -func (r *Renderer) OutHRTag(w io.Writer, attrs []string) { - hr := TagWithAttributes("") -} - -// Text writes ast.Text node -func (r *Renderer) Text(w io.Writer, text *ast.Text) { - if r.Opts.Flags&Smartypants != 0 { - var tmp bytes.Buffer - EscapeHTML(&tmp, text.Literal) - r.sr.Process(w, tmp.Bytes()) - } else { - _, parentIsLink := text.Parent.(*ast.Link) - if parentIsLink { - EscLink(w, text.Literal) - } else { - EscapeHTML(w, text.Literal) - } - } -} - -// HardBreak writes ast.Hardbreak node -func (r *Renderer) HardBreak(w io.Writer, node *ast.Hardbreak) { - r.OutOneOf(w, r.Opts.Flags&UseXHTML == 0, "
    ", "
    ") - r.CR(w) -} - -// NonBlockingSpace writes ast.NonBlockingSpace node -func (r *Renderer) NonBlockingSpace(w io.Writer, node *ast.NonBlockingSpace) { - r.Outs(w, " ") -} - -// OutOneOf writes first or second depending on outFirst -func (r *Renderer) OutOneOf(w io.Writer, outFirst bool, first string, second string) { - if outFirst { - r.Outs(w, first) - } else { - r.Outs(w, second) - } -} - -// OutOneOfCr writes CR + first or second + CR depending on outFirst -func (r *Renderer) OutOneOfCr(w io.Writer, outFirst bool, first string, second string) { - if outFirst { - r.CR(w) - r.Outs(w, first) - } else { - r.Outs(w, second) - r.CR(w) - } -} - -// HTMLSpan writes ast.HTMLSpan node -func (r *Renderer) HTMLSpan(w io.Writer, span *ast.HTMLSpan) { - if r.Opts.Flags&SkipHTML == 0 { - r.Out(w, span.Literal) - } -} - -func (r *Renderer) linkEnter(w io.Writer, link *ast.Link) { - attrs := link.AdditionalAttributes - dest := link.Destination - dest = AddAbsPrefix(dest, r.Opts.AbsolutePrefix) - var hrefBuf bytes.Buffer - hrefBuf.WriteString("href=\"") - EscLink(&hrefBuf, dest) - hrefBuf.WriteByte('"') - attrs = append(attrs, hrefBuf.String()) - if link.NoteID != 0 { - r.Outs(w, FootnoteRef(r.Opts.FootnoteAnchorPrefix, link)) - return - } - - attrs = appendLinkAttrs(attrs, r.Opts.Flags, dest) - if len(link.Title) > 0 { - var titleBuff bytes.Buffer - titleBuff.WriteString("title=\"") - EscapeHTML(&titleBuff, link.Title) - titleBuff.WriteByte('"') - attrs = append(attrs, titleBuff.String()) - } - r.OutTag(w, "") - } -} - -// Link writes ast.Link node -func (r *Renderer) Link(w io.Writer, link *ast.Link, entering bool) { - // mark it but don't link it if it is not a safe link: no smartypants - if needSkipLink(r, link.Destination) { - r.OutOneOf(w, entering, "", "") - return - } - - if entering { - r.linkEnter(w, link) - } else { - r.linkExit(w, link) - } -} - -func (r *Renderer) imageEnter(w io.Writer, image *ast.Image) { - r.DisableTags++ - if r.DisableTags > 1 { - return - } - src := image.Destination - src = AddAbsPrefix(src, r.Opts.AbsolutePrefix) - attrs := BlockAttrs(image) - if r.Opts.Flags&LazyLoadImages != 0 { - attrs = append(attrs, `loading="lazy"`) - } - - s := TagWithAttributes("" from end - r.Outs(w, s+` src="`) - EscLink(w, src) - r.Outs(w, `" alt="`) -} - -func (r *Renderer) imageExit(w io.Writer, image *ast.Image) { - r.DisableTags-- - if r.DisableTags > 0 { - return - } - if image.Title != nil { - r.Outs(w, `" title="`) - EscapeHTML(w, image.Title) - } - r.Outs(w, `" />`) -} - -// Image writes ast.Image node -func (r *Renderer) Image(w io.Writer, node *ast.Image, entering bool) { - if entering { - r.imageEnter(w, node) - } else { - r.imageExit(w, node) - } -} - -func (r *Renderer) paragraphEnter(w io.Writer, para *ast.Paragraph) { - // TODO: untangle this clusterfuck about when the newlines need - // to be added and when not. - prev := ast.GetPrevNode(para) - if prev != nil { - switch prev.(type) { - case *ast.HTMLBlock, *ast.List, *ast.Paragraph, *ast.Heading, *ast.CaptionFigure, *ast.CodeBlock, *ast.BlockQuote, *ast.Aside, *ast.HorizontalRule: - r.CR(w) - } - } - - if prev == nil { - _, isParentBlockQuote := para.Parent.(*ast.BlockQuote) - if isParentBlockQuote { - r.CR(w) - } - _, isParentAside := para.Parent.(*ast.Aside) - if isParentAside { - r.CR(w) - } - } - - ptag := "" - } - r.Outs(w, ptag) - if !(IsListItem(para.Parent) && ast.GetNextNode(para) == nil) { - r.CR(w) - } -} - -// Paragraph writes ast.Paragraph node -func (r *Renderer) Paragraph(w io.Writer, para *ast.Paragraph, entering bool) { - if SkipParagraphTags(para) { - return - } - if entering { - r.paragraphEnter(w, para) - } else { - r.paragraphExit(w, para) - } -} - -// Code writes ast.Code node -func (r *Renderer) Code(w io.Writer, node *ast.Code) { - r.Outs(w, "") - EscapeHTML(w, node.Literal) - r.Outs(w, "") -} - -// HTMLBlock write ast.HTMLBlock node -func (r *Renderer) HTMLBlock(w io.Writer, node *ast.HTMLBlock) { - if r.Opts.Flags&SkipHTML != 0 { - return - } - r.CR(w) - r.Out(w, node.Literal) - r.CR(w) -} - -func (r *Renderer) EnsureUniqueHeadingID(id string) string { - for count, found := r.headingIDs[id]; found; count, found = r.headingIDs[id] { - tmp := fmt.Sprintf("%s-%d", id, count+1) - - if _, tmpFound := r.headingIDs[tmp]; !tmpFound { - r.headingIDs[id] = count + 1 - id = tmp - } else { - id = id + "-1" - } - } - - if _, found := r.headingIDs[id]; !found { - r.headingIDs[id] = 0 - } - - return id -} - -func (r *Renderer) headingEnter(w io.Writer, nodeData *ast.Heading) { - var attrs []string - var class string - // TODO(miek): add helper functions for coalescing these classes. - if nodeData.IsTitleblock { - class = "title" - } - if nodeData.IsSpecial { - if class != "" { - class += " special" - } else { - class = "special" - } - } - if class != "" { - attrs = []string{`class="` + class + `"`} - } - - if nodeData.HeadingID != "" { - id := r.EnsureUniqueHeadingID(nodeData.HeadingID) - if r.Opts.HeadingIDPrefix != "" { - id = r.Opts.HeadingIDPrefix + id - } - if r.Opts.HeadingIDSuffix != "" { - id = id + r.Opts.HeadingIDSuffix - } - attrID := `id="` + id + `"` - attrs = append(attrs, attrID) - } - attrs = append(attrs, BlockAttrs(nodeData)...) - r.CR(w) - r.OutTag(w, HeadingOpenTagFromLevel(nodeData.Level), attrs) -} - -func (r *Renderer) headingExit(w io.Writer, heading *ast.Heading) { - r.Outs(w, HeadingCloseTagFromLevel(heading.Level)) - if !(IsListItem(heading.Parent) && ast.GetNextNode(heading) == nil) { - r.CR(w) - } -} - -// Heading writes ast.Heading node -func (r *Renderer) Heading(w io.Writer, node *ast.Heading, entering bool) { - if entering { - r.headingEnter(w, node) - } else { - r.headingExit(w, node) - } -} - -// HorizontalRule writes ast.HorizontalRule node -func (r *Renderer) HorizontalRule(w io.Writer, node *ast.HorizontalRule) { - r.CR(w) - r.OutHRTag(w, BlockAttrs(node)) - r.CR(w) -} - -func (r *Renderer) listEnter(w io.Writer, nodeData *ast.List) { - // TODO: attrs don't seem to be set - var attrs []string - - if nodeData.IsFootnotesList { - r.Outs(w, "\n
    \n\n") - if r.Opts.Flags&FootnoteNoHRTag == 0 { - r.OutHRTag(w, nil) - r.CR(w) - } - } - r.CR(w) - if IsListItem(nodeData.Parent) { - grand := nodeData.Parent.GetParent() - if IsListTight(grand) { - r.CR(w) - } - } - - openTag := " 0 { - attrs = append(attrs, fmt.Sprintf(`start="%d"`, nodeData.Start)) - } - openTag = "\n") - } -} - -// List writes ast.List node -func (r *Renderer) List(w io.Writer, list *ast.List, entering bool) { - if entering { - r.listEnter(w, list) - } else { - r.listExit(w, list) - } -} - -func (r *Renderer) listItemEnter(w io.Writer, listItem *ast.ListItem) { - if ListItemOpenCR(listItem) { - r.CR(w) - } - if listItem.RefLink != nil { - slug := Slugify(listItem.RefLink) - r.Outs(w, FootnoteItem(r.Opts.FootnoteAnchorPrefix, slug)) - return - } - - openTag := "
  • " - if listItem.ListFlags&ast.ListTypeDefinition != 0 { - openTag = "
    " - } - if listItem.ListFlags&ast.ListTypeTerm != 0 { - openTag = "
    " - } - r.Outs(w, openTag) -} - -func (r *Renderer) listItemExit(w io.Writer, listItem *ast.ListItem) { - if listItem.RefLink != nil && r.Opts.Flags&FootnoteReturnLinks != 0 { - slug := Slugify(listItem.RefLink) - prefix := r.Opts.FootnoteAnchorPrefix - link := r.Opts.FootnoteReturnLinkContents - s := FootnoteReturnLink(prefix, link, slug) - r.Outs(w, s) - } - - closeTag := "
  • " - if listItem.ListFlags&ast.ListTypeDefinition != 0 { - closeTag = "" - } - if listItem.ListFlags&ast.ListTypeTerm != 0 { - closeTag = "" - } - r.Outs(w, closeTag) - r.CR(w) -} - -// ListItem writes ast.ListItem node -func (r *Renderer) ListItem(w io.Writer, listItem *ast.ListItem, entering bool) { - if entering { - r.listItemEnter(w, listItem) - } else { - r.listItemExit(w, listItem) - } -} - -// EscapeHTMLCallouts writes html-escaped d to w. It escapes &, <, > and " characters, *but* -// expands callouts <> with the callout HTML, i.e. by calling r.callout() with a newly created -// ast.Callout node. -func (r *Renderer) EscapeHTMLCallouts(w io.Writer, d []byte) { - ld := len(d) -Parse: - for i := 0; i < ld; i++ { - for _, comment := range r.Opts.Comments { - if !bytes.HasPrefix(d[i:], comment) { - break - } - - lc := len(comment) - if i+lc < ld { - if id, consumed := parser.IsCallout(d[i+lc:]); consumed > 0 { - // We have seen a callout - callout := &ast.Callout{ID: id} - r.Callout(w, callout) - i += consumed + lc - 1 - continue Parse - } - } - } - - escSeq := Escaper[d[i]] - if escSeq != nil { - w.Write(escSeq) - } else { - w.Write([]byte{d[i]}) - } - } -} - -// CodeBlock writes ast.CodeBlock node -func (r *Renderer) CodeBlock(w io.Writer, codeBlock *ast.CodeBlock) { - var attrs []string - // TODO(miek): this can add multiple class= attribute, they should be coalesced into one. - // This is probably true for some other elements as well - attrs = appendLanguageAttr(attrs, codeBlock.Info) - attrs = append(attrs, BlockAttrs(codeBlock)...) - r.CR(w) - - r.Outs(w, "
    ")
    -	code := TagWithAttributes("")
    -	r.Outs(w, "
    ") - if !IsListItem(codeBlock.Parent) { - r.CR(w) - } -} - -// Caption writes ast.Caption node -func (r *Renderer) Caption(w io.Writer, caption *ast.Caption, entering bool) { - if entering { - r.Outs(w, "
    ") - return - } - r.Outs(w, "
    ") -} - -// CaptionFigure writes ast.CaptionFigure node -func (r *Renderer) CaptionFigure(w io.Writer, figure *ast.CaptionFigure, entering bool) { - // TODO(miek): copy more generic ways of mmark over to here. - fig := "` - } else { - fig += ">" - } - r.OutOneOf(w, entering, fig, "\n\n") -} - -// TableCell writes ast.TableCell node -func (r *Renderer) TableCell(w io.Writer, tableCell *ast.TableCell, entering bool) { - if !entering { - r.OutOneOf(w, tableCell.IsHeader, "", "") - r.CR(w) - return - } - - // entering - var attrs []string - openTag := " 0 { - attrs = append(attrs, fmt.Sprintf(`colspan="%d"`, colspan)) - } - if ast.GetPrevNode(tableCell) == nil { - r.CR(w) - } - r.OutTag(w, openTag, attrs) -} - -// TableBody writes ast.TableBody node -func (r *Renderer) TableBody(w io.Writer, node *ast.TableBody, entering bool) { - if entering { - r.CR(w) - r.Outs(w, "") - // XXX: this is to adhere to a rather silly test. Should fix test. - if ast.GetFirstChild(node) == nil { - r.CR(w) - } - } else { - r.Outs(w, "") - r.CR(w) - } -} - -// DocumentMatter writes ast.DocumentMatter -func (r *Renderer) DocumentMatter(w io.Writer, node *ast.DocumentMatter, entering bool) { - if !entering { - return - } - if r.documentMatter != ast.DocumentMatterNone { - r.Outs(w, "\n") - } - switch node.Matter { - case ast.DocumentMatterFront: - r.Outs(w, `
    `) - case ast.DocumentMatterMain: - r.Outs(w, `
    `) - case ast.DocumentMatterBack: - r.Outs(w, `
    `) - } - r.documentMatter = node.Matter -} - -// Citation writes ast.Citation node -func (r *Renderer) Citation(w io.Writer, node *ast.Citation) { - for i, c := range node.Destination { - attr := []string{`class="none"`} - switch node.Type[i] { - case ast.CitationTypeNormative: - attr[0] = `class="normative"` - case ast.CitationTypeInformative: - attr[0] = `class="informative"` - case ast.CitationTypeSuppressed: - attr[0] = `class="suppressed"` - } - r.OutTag(w, "`+r.Opts.CitationFormatString+``, c, c)) - r.Outs(w, "") - } -} - -// Callout writes ast.Callout node -func (r *Renderer) Callout(w io.Writer, node *ast.Callout) { - attr := []string{`class="callout"`} - r.OutTag(w, "") -} - -// Index writes ast.Index node -func (r *Renderer) Index(w io.Writer, node *ast.Index) { - // there is no in-text representation. - attr := []string{`class="index"`, fmt.Sprintf(`id="%s"`, node.ID)} - r.OutTag(w, "") -} - -// RenderNode renders a markdown node to HTML -func (r *Renderer) RenderNode(w io.Writer, node ast.Node, entering bool) ast.WalkStatus { - if r.Opts.RenderNodeHook != nil { - status, didHandle := r.Opts.RenderNodeHook(w, node, entering) - if didHandle { - return status - } - } - switch node := node.(type) { - case *ast.Text: - r.Text(w, node) - case *ast.Softbreak: - r.CR(w) - // TODO: make it configurable via out(renderer.softbreak) - case *ast.Hardbreak: - r.HardBreak(w, node) - case *ast.NonBlockingSpace: - r.NonBlockingSpace(w, node) - case *ast.Emph: - r.OutOneOf(w, entering, "", "") - case *ast.Strong: - r.OutOneOf(w, entering, "", "") - case *ast.Del: - r.OutOneOf(w, entering, "", "") - case *ast.BlockQuote: - tag := TagWithAttributes("") - case *ast.Aside: - tag := TagWithAttributes("") - case *ast.Link: - r.Link(w, node, entering) - case *ast.CrossReference: - link := &ast.Link{Destination: append([]byte("#"), node.Destination...)} - r.Link(w, link, entering) - case *ast.Citation: - r.Citation(w, node) - case *ast.Image: - if r.Opts.Flags&SkipImages != 0 { - return ast.SkipChildren - } - r.Image(w, node, entering) - case *ast.Code: - r.Code(w, node) - case *ast.CodeBlock: - r.CodeBlock(w, node) - case *ast.Caption: - r.Caption(w, node, entering) - case *ast.CaptionFigure: - r.CaptionFigure(w, node, entering) - case *ast.Document: - // do nothing - case *ast.Paragraph: - r.Paragraph(w, node, entering) - case *ast.HTMLSpan: - r.HTMLSpan(w, node) - case *ast.HTMLBlock: - r.HTMLBlock(w, node) - case *ast.Heading: - r.Heading(w, node, entering) - case *ast.HorizontalRule: - r.HorizontalRule(w, node) - case *ast.List: - r.List(w, node, entering) - case *ast.ListItem: - r.ListItem(w, node, entering) - case *ast.Table: - tag := TagWithAttributes("") - case *ast.TableCell: - r.TableCell(w, node, entering) - case *ast.TableHeader: - r.OutOneOfCr(w, entering, "", "") - case *ast.TableBody: - r.TableBody(w, node, entering) - case *ast.TableRow: - r.OutOneOfCr(w, entering, "", "") - case *ast.TableFooter: - r.OutOneOfCr(w, entering, "", "") - case *ast.Math: - r.OutOneOf(w, true, `\(`, `\)`) - EscapeHTML(w, node.Literal) - r.OutOneOf(w, false, `\(`, `\)`) - case *ast.MathBlock: - r.OutOneOf(w, entering, `

    \[`, `\]

    `) - if entering { - EscapeHTML(w, node.Literal) - } - case *ast.DocumentMatter: - r.DocumentMatter(w, node, entering) - case *ast.Callout: - r.Callout(w, node) - case *ast.Index: - r.Index(w, node) - case *ast.Subscript: - r.OutOneOf(w, true, "", "") - if entering { - Escape(w, node.Literal) - } - r.OutOneOf(w, false, "", "") - case *ast.Superscript: - r.OutOneOf(w, true, "", "") - if entering { - Escape(w, node.Literal) - } - r.OutOneOf(w, false, "", "") - case *ast.Footnotes: - // nothing by default; just output the list. - default: - panic(fmt.Sprintf("Unknown node %T", node)) - } - return ast.GoToNext -} - -// RenderHeader writes HTML document preamble and TOC if requested. -func (r *Renderer) RenderHeader(w io.Writer, ast ast.Node) { - r.writeDocumentHeader(w) - if r.Opts.Flags&TOC != 0 { - r.writeTOC(w, ast) - } -} - -// RenderFooter writes HTML document footer. -func (r *Renderer) RenderFooter(w io.Writer, _ ast.Node) { - if r.documentMatter != ast.DocumentMatterNone { - r.Outs(w, "
    \n") - } - - if r.Opts.Flags&CompletePage == 0 { - return - } - io.WriteString(w, "\n\n\n") -} - -func (r *Renderer) writeDocumentHeader(w io.Writer) { - if r.Opts.Flags&CompletePage == 0 { - return - } - ending := "" - if r.Opts.Flags&UseXHTML != 0 { - io.WriteString(w, "\n") - io.WriteString(w, "\n") - ending = " /" - } else { - io.WriteString(w, "\n") - io.WriteString(w, "\n") - } - io.WriteString(w, "\n") - io.WriteString(w, " ") - if r.Opts.Flags&Smartypants != 0 { - r.sr.Process(w, []byte(r.Opts.Title)) - } else { - EscapeHTML(w, []byte(r.Opts.Title)) - } - io.WriteString(w, "\n") - io.WriteString(w, r.Opts.Generator) - io.WriteString(w, "\"") - io.WriteString(w, ending) - io.WriteString(w, ">\n") - io.WriteString(w, " \n") - if r.Opts.CSS != "" { - io.WriteString(w, " \n") - } - if r.Opts.Icon != "" { - io.WriteString(w, " \n") - } - if r.Opts.Head != nil { - w.Write(r.Opts.Head) - } - io.WriteString(w, "\n") - io.WriteString(w, "\n\n") -} - -func (r *Renderer) writeTOC(w io.Writer, doc ast.Node) { - buf := bytes.Buffer{} - - inHeading := false - tocLevel := 0 - headingCount := 0 - - ast.WalkFunc(doc, func(node ast.Node, entering bool) ast.WalkStatus { - if nodeData, ok := node.(*ast.Heading); ok && !nodeData.IsTitleblock { - inHeading = entering - if !entering { - buf.WriteString("") - return ast.GoToNext - } - if nodeData.HeadingID == "" { - nodeData.HeadingID = fmt.Sprintf("toc_%d", headingCount) - } - if nodeData.Level == tocLevel { - buf.WriteString("\n\n
  • ") - } else if nodeData.Level < tocLevel { - for nodeData.Level < tocLevel { - tocLevel-- - buf.WriteString("
  • \n") - } - buf.WriteString("\n\n
  • ") - } else { - for nodeData.Level > tocLevel { - tocLevel++ - buf.WriteString("\n") - } - - if buf.Len() > 0 { - io.WriteString(w, "\n") - } - r.lastOutputLen = buf.Len() -} - -func IsList(node ast.Node) bool { - _, ok := node.(*ast.List) - return ok -} - -func IsListTight(node ast.Node) bool { - if list, ok := node.(*ast.List); ok { - return list.Tight - } - return false -} - -func IsListItem(node ast.Node) bool { - _, ok := node.(*ast.ListItem) - return ok -} - -func IsListItemTerm(node ast.Node) bool { - data, ok := node.(*ast.ListItem) - return ok && data.ListFlags&ast.ListTypeTerm != 0 -} - -// TODO: move to internal package -// Create a url-safe slug for fragments -func Slugify(in []byte) []byte { - if len(in) == 0 { - return in - } - out := make([]byte, 0, len(in)) - sym := false - - for _, ch := range in { - if isAlnum(ch) { - sym = false - out = append(out, ch) - } else if sym { - continue - } else { - out = append(out, '-') - sym = true - } - } - var a, b int - var ch byte - for a, ch = range out { - if ch != '-' { - break - } - } - for b = len(out) - 1; b > 0; b-- { - if out[b] != '-' { - break - } - } - return out[a : b+1] -} - -// BlockAttrs takes a node and checks if it has block level attributes set. If so it -// will return a slice each containing a "key=value(s)" string. -func BlockAttrs(node ast.Node) []string { - var attr *ast.Attribute - if c := node.AsContainer(); c != nil && c.Attribute != nil { - attr = c.Attribute - } - if l := node.AsLeaf(); l != nil && l.Attribute != nil { - attr = l.Attribute - } - if attr == nil { - return nil - } - - var s []string - if attr.ID != nil { - s = append(s, fmt.Sprintf(`%s="%s"`, IDTag, attr.ID)) - } - - classes := "" - for _, c := range attr.Classes { - classes += " " + string(c) - } - if classes != "" { - s = append(s, fmt.Sprintf(`class="%s"`, classes[1:])) // skip space we added. - } - - // sort the attributes so it remain stable between runs - var keys = []string{} - for k := range attr.Attrs { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - s = append(s, fmt.Sprintf(`%s="%s"`, k, attr.Attrs[k])) - } - - return s -} - -// TagWithAttributes creates a HTML tag with a given name and attributes -func TagWithAttributes(name string, attrs []string) string { - s := name - if len(attrs) > 0 { - s += " " + strings.Join(attrs, " ") - } - return s + ">" -} diff --git a/vendor/github.com/gomarkdown/markdown/html/smartypants.go b/vendor/github.com/gomarkdown/markdown/html/smartypants.go deleted file mode 100644 index 706e4ff1a8..0000000000 --- a/vendor/github.com/gomarkdown/markdown/html/smartypants.go +++ /dev/null @@ -1,452 +0,0 @@ -package html - -import ( - "bytes" - "io" - - "github.com/gomarkdown/markdown/parser" -) - -// SmartyPants rendering - -var ( - isSpace = parser.IsSpace - isAlnum = parser.IsAlnum - isPunctuation = parser.IsPunctuation -) - -// SPRenderer is a struct containing state of a Smartypants renderer. -type SPRenderer struct { - inSingleQuote bool - inDoubleQuote bool - callbacks [256]smartCallback -} - -func wordBoundary(c byte) bool { - return c == 0 || isSpace(c) || isPunctuation(c) -} - -func tolower(c byte) byte { - if c >= 'A' && c <= 'Z' { - return c - 'A' + 'a' - } - return c -} - -func isdigit(c byte) bool { - return c >= '0' && c <= '9' -} - -func smartQuoteHelper(out *bytes.Buffer, previousChar byte, nextChar byte, quote byte, isOpen *bool, addNBSP bool) bool { - // edge of the buffer is likely to be a tag that we don't get to see, - // so we treat it like text sometimes - - // enumerate all sixteen possibilities for (previousChar, nextChar) - // each can be one of {0, space, punct, other} - switch { - case previousChar == 0 && nextChar == 0: - // context is not any help here, so toggle - *isOpen = !*isOpen - case isSpace(previousChar) && nextChar == 0: - // [ "] might be [ "foo...] - *isOpen = true - case isPunctuation(previousChar) && nextChar == 0: - // [!"] hmm... could be [Run!"] or [("...] - *isOpen = false - case /* isnormal(previousChar) && */ nextChar == 0: - // [a"] is probably a close - *isOpen = false - case previousChar == 0 && isSpace(nextChar): - // [" ] might be [...foo" ] - *isOpen = false - case isSpace(previousChar) && isSpace(nextChar): - // [ " ] context is not any help here, so toggle - *isOpen = !*isOpen - case isPunctuation(previousChar) && isSpace(nextChar): - // [!" ] is probably a close - *isOpen = false - case /* isnormal(previousChar) && */ isSpace(nextChar): - // [a" ] this is one of the easy cases - *isOpen = false - case previousChar == 0 && isPunctuation(nextChar): - // ["!] hmm... could be ["$1.95] or ["!...] - *isOpen = false - case isSpace(previousChar) && isPunctuation(nextChar): - // [ "!] looks more like [ "$1.95] - *isOpen = true - case isPunctuation(previousChar) && isPunctuation(nextChar): - // [!"!] context is not any help here, so toggle - *isOpen = !*isOpen - case /* isnormal(previousChar) && */ isPunctuation(nextChar): - // [a"!] is probably a close - *isOpen = false - case previousChar == 0 /* && isnormal(nextChar) */ : - // ["a] is probably an open - *isOpen = true - case isSpace(previousChar) /* && isnormal(nextChar) */ : - // [ "a] this is one of the easy cases - *isOpen = true - case isPunctuation(previousChar) /* && isnormal(nextChar) */ : - // [!"a] is probably an open - *isOpen = true - default: - // [a'b] maybe a contraction? - *isOpen = false - } - - // Note that with the limited lookahead, this non-breaking - // space will also be appended to single double quotes. - if addNBSP && !*isOpen { - out.WriteString(" ") - } - - out.WriteByte('&') - if *isOpen { - out.WriteByte('l') - } else { - out.WriteByte('r') - } - out.WriteByte(quote) - out.WriteString("quo;") - - if addNBSP && *isOpen { - out.WriteString(" ") - } - - return true -} - -func (r *SPRenderer) smartSingleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 2 { - t1 := tolower(text[1]) - - if t1 == '\'' { - nextChar := byte(0) - if len(text) >= 3 { - nextChar = text[2] - } - if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) { - return 1 - } - } - - if (t1 == 's' || t1 == 't' || t1 == 'm' || t1 == 'd') && (len(text) < 3 || wordBoundary(text[2])) { - out.WriteString("’") - return 0 - } - - if len(text) >= 3 { - t2 := tolower(text[2]) - - if ((t1 == 'r' && t2 == 'e') || (t1 == 'l' && t2 == 'l') || (t1 == 'v' && t2 == 'e')) && - (len(text) < 4 || wordBoundary(text[3])) { - out.WriteString("’") - return 0 - } - } - } - - nextChar := byte(0) - if len(text) > 1 { - nextChar = text[1] - } - if smartQuoteHelper(out, previousChar, nextChar, 's', &r.inSingleQuote, false) { - return 0 - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartParens(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 3 { - t1 := tolower(text[1]) - t2 := tolower(text[2]) - - if t1 == 'c' && t2 == ')' { - out.WriteString("©") - return 2 - } - - if t1 == 'r' && t2 == ')' { - out.WriteString("®") - return 2 - } - - if len(text) >= 4 && t1 == 't' && t2 == 'm' && text[3] == ')' { - out.WriteString("™") - return 3 - } - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartDash(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 2 { - if text[1] == '-' { - out.WriteString("—") - return 1 - } - - if wordBoundary(previousChar) && wordBoundary(text[1]) { - out.WriteString("–") - return 0 - } - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartDashLatex(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 3 && text[1] == '-' && text[2] == '-' { - out.WriteString("—") - return 2 - } - if len(text) >= 2 && text[1] == '-' { - out.WriteString("–") - return 1 - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartAmpVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte, addNBSP bool) int { - if bytes.HasPrefix(text, []byte(""")) { - nextChar := byte(0) - if len(text) >= 7 { - nextChar = text[6] - } - if smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, addNBSP) { - return 5 - } - } - - if bytes.HasPrefix(text, []byte("�")) { - return 3 - } - - out.WriteByte('&') - return 0 -} - -func (r *SPRenderer) smartAmp(angledQuotes, addNBSP bool) func(*bytes.Buffer, byte, []byte) int { - var quote byte = 'd' - if angledQuotes { - quote = 'a' - } - - return func(out *bytes.Buffer, previousChar byte, text []byte) int { - return r.smartAmpVariant(out, previousChar, text, quote, addNBSP) - } -} - -func (r *SPRenderer) smartPeriod(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 3 && text[1] == '.' && text[2] == '.' { - out.WriteString("…") - return 2 - } - - if len(text) >= 5 && text[1] == ' ' && text[2] == '.' && text[3] == ' ' && text[4] == '.' { - out.WriteString("…") - return 4 - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartBacktick(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 2 && text[1] == '`' { - nextChar := byte(0) - if len(text) >= 3 { - nextChar = text[2] - } - if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) { - return 1 - } - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartNumberGeneric(out *bytes.Buffer, previousChar byte, text []byte) int { - if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 { - // is it of the form digits/digits(word boundary)?, i.e., \d+/\d+\b - // note: check for regular slash (/) or fraction slash (⁄, 0x2044, or 0xe2 81 84 in utf-8) - // and avoid changing dates like 1/23/2005 into fractions. - numEnd := 0 - for len(text) > numEnd && isdigit(text[numEnd]) { - numEnd++ - } - if numEnd == 0 { - out.WriteByte(text[0]) - return 0 - } - denStart := numEnd + 1 - if len(text) > numEnd+3 && text[numEnd] == 0xe2 && text[numEnd+1] == 0x81 && text[numEnd+2] == 0x84 { - denStart = numEnd + 3 - } else if len(text) < numEnd+2 || text[numEnd] != '/' { - out.WriteByte(text[0]) - return 0 - } - denEnd := denStart - for len(text) > denEnd && isdigit(text[denEnd]) { - denEnd++ - } - if denEnd == denStart { - out.WriteByte(text[0]) - return 0 - } - if len(text) == denEnd || wordBoundary(text[denEnd]) && text[denEnd] != '/' { - out.WriteString("") - out.Write(text[:numEnd]) - out.WriteString("") - out.Write(text[denStart:denEnd]) - out.WriteString("") - return denEnd - 1 - } - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartNumber(out *bytes.Buffer, previousChar byte, text []byte) int { - if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 { - if text[0] == '1' && text[1] == '/' && text[2] == '2' { - if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' { - out.WriteString("½") - return 2 - } - } - - if text[0] == '1' && text[1] == '/' && text[2] == '4' { - if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 5 && tolower(text[3]) == 't' && tolower(text[4]) == 'h') { - out.WriteString("¼") - return 2 - } - } - - if text[0] == '3' && text[1] == '/' && text[2] == '4' { - if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 6 && tolower(text[3]) == 't' && tolower(text[4]) == 'h' && tolower(text[5]) == 's') { - out.WriteString("¾") - return 2 - } - } - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartDoubleQuoteVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte) int { - nextChar := byte(0) - if len(text) > 1 { - nextChar = text[1] - } - if !smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, false) { - out.WriteString(""") - } - - return 0 -} - -func (r *SPRenderer) smartDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { - return r.smartDoubleQuoteVariant(out, previousChar, text, 'd') -} - -func (r *SPRenderer) smartAngledDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { - return r.smartDoubleQuoteVariant(out, previousChar, text, 'a') -} - -func (r *SPRenderer) smartLeftAngle(out *bytes.Buffer, previousChar byte, text []byte) int { - i := 0 - - for i < len(text) && text[i] != '>' { - i++ - } - - out.Write(text[:i+1]) - return i -} - -type smartCallback func(out *bytes.Buffer, previousChar byte, text []byte) int - -// NewSmartypantsRenderer constructs a Smartypants renderer object. -func NewSmartypantsRenderer(flags Flags) *SPRenderer { - var ( - r SPRenderer - - smartAmpAngled = r.smartAmp(true, false) - smartAmpAngledNBSP = r.smartAmp(true, true) - smartAmpRegular = r.smartAmp(false, false) - smartAmpRegularNBSP = r.smartAmp(false, true) - - addNBSP = flags&SmartypantsQuotesNBSP != 0 - ) - - if flags&SmartypantsAngledQuotes == 0 { - r.callbacks['"'] = r.smartDoubleQuote - if !addNBSP { - r.callbacks['&'] = smartAmpRegular - } else { - r.callbacks['&'] = smartAmpRegularNBSP - } - } else { - r.callbacks['"'] = r.smartAngledDoubleQuote - if !addNBSP { - r.callbacks['&'] = smartAmpAngled - } else { - r.callbacks['&'] = smartAmpAngledNBSP - } - } - r.callbacks['\''] = r.smartSingleQuote - r.callbacks['('] = r.smartParens - if flags&SmartypantsDashes != 0 { - if flags&SmartypantsLatexDashes == 0 { - r.callbacks['-'] = r.smartDash - } else { - r.callbacks['-'] = r.smartDashLatex - } - } - r.callbacks['.'] = r.smartPeriod - if flags&SmartypantsFractions == 0 { - r.callbacks['1'] = r.smartNumber - r.callbacks['3'] = r.smartNumber - } else { - for ch := '1'; ch <= '9'; ch++ { - r.callbacks[ch] = r.smartNumberGeneric - } - } - r.callbacks['<'] = r.smartLeftAngle - r.callbacks['`'] = r.smartBacktick - return &r -} - -// Process is the entry point of the Smartypants renderer. -func (r *SPRenderer) Process(w io.Writer, text []byte) { - mark := 0 - for i := 0; i < len(text); i++ { - if action := r.callbacks[text[i]]; action != nil { - if i > mark { - w.Write(text[mark:i]) - } - previousChar := byte(0) - if i > 0 { - previousChar = text[i-1] - } - var tmp bytes.Buffer - i += action(&tmp, previousChar, text[i:]) - w.Write(tmp.Bytes()) - mark = i + 1 - } - } - if mark < len(text) { - w.Write(text[mark:]) - } -} diff --git a/vendor/github.com/gomarkdown/markdown/parser/aside.go b/vendor/github.com/gomarkdown/markdown/parser/aside.go deleted file mode 100644 index 9d02ed0490..0000000000 --- a/vendor/github.com/gomarkdown/markdown/parser/aside.go +++ /dev/null @@ -1,73 +0,0 @@ -package parser - -import ( - "bytes" - - "github.com/gomarkdown/markdown/ast" -) - -// returns aisde prefix length -func (p *Parser) asidePrefix(data []byte) int { - i := 0 - n := len(data) - for i < 3 && i < n && data[i] == ' ' { - i++ - } - if i+1 < n && data[i] == 'A' && data[i+1] == '>' { - if i+2 < n && data[i+2] == ' ' { - return i + 3 - } - return i + 2 - } - return 0 -} - -// aside ends with at least one blank line -// followed by something without a aside prefix -func (p *Parser) terminateAside(data []byte, beg, end int) bool { - if IsEmpty(data[beg:]) <= 0 { - return false - } - if end >= len(data) { - return true - } - return p.asidePrefix(data[end:]) == 0 && IsEmpty(data[end:]) == 0 -} - -// parse a aside fragment -func (p *Parser) aside(data []byte) int { - var raw bytes.Buffer - beg, end := 0, 0 - // identical to quote - for beg < len(data) { - end = beg - // Step over whole lines, collecting them. While doing that, check for - // fenced code and if one's found, incorporate it altogether, - // irregardless of any contents inside it - for end < len(data) && data[end] != '\n' { - if p.extensions&FencedCode != 0 { - if i := p.fencedCodeBlock(data[end:], false); i > 0 { - // -1 to compensate for the extra end++ after the loop: - end += i - 1 - break - } - } - end++ - } - end = skipCharN(data, end, '\n', 1) - if pre := p.asidePrefix(data[beg:]); pre > 0 { - // skip the prefix - beg += pre - } else if p.terminateAside(data, beg, end) { - break - } - // this line is part of the aside - raw.Write(data[beg:end]) - beg = end - } - - block := p.AddBlock(&ast.Aside{}) - p.Block(raw.Bytes()) - p.Finalize(block) - return end -} diff --git a/vendor/github.com/gomarkdown/markdown/parser/attribute.go b/vendor/github.com/gomarkdown/markdown/parser/attribute.go deleted file mode 100644 index 5fdb07095a..0000000000 --- a/vendor/github.com/gomarkdown/markdown/parser/attribute.go +++ /dev/null @@ -1,116 +0,0 @@ -package parser - -import ( - "bytes" - - "github.com/gomarkdown/markdown/ast" -) - -// attribute parses a (potential) block attribute and adds it to p. -func (p *Parser) attribute(data []byte) []byte { - if len(data) < 3 { - return data - } - i := 0 - if data[i] != '{' { - return data - } - i++ - - // last character must be a } otherwise it's not an attribute - end := skipUntilChar(data, i, '\n') - if data[end-1] != '}' { - return data - } - - i = skipSpace(data, i) - b := &ast.Attribute{Attrs: make(map[string][]byte)} - - esc := false - quote := false - trail := 0 -Loop: - for ; i < len(data); i++ { - switch data[i] { - case ' ', '\t', '\f', '\v': - if quote { - continue - } - chunk := data[trail+1 : i] - if len(chunk) == 0 { - trail = i - continue - } - switch { - case chunk[0] == '.': - b.Classes = append(b.Classes, chunk[1:]) - case chunk[0] == '#': - b.ID = chunk[1:] - default: - k, v := keyValue(chunk) - if k != nil && v != nil { - b.Attrs[string(k)] = v - } else { - // this is illegal in an attribute - return data - } - } - trail = i - case '"': - if esc { - esc = !esc - continue - } - quote = !quote - case '\\': - esc = !esc - case '}': - if esc { - esc = !esc - continue - } - chunk := data[trail+1 : i] - if len(chunk) == 0 { - return data - } - switch { - case chunk[0] == '.': - b.Classes = append(b.Classes, chunk[1:]) - case chunk[0] == '#': - b.ID = chunk[1:] - default: - k, v := keyValue(chunk) - if k != nil && v != nil { - b.Attrs[string(k)] = v - } else { - return data - } - } - i++ - break Loop - default: - esc = false - } - } - - p.attr = b - return data[i:] -} - -// key="value" quotes are mandatory. -func keyValue(data []byte) ([]byte, []byte) { - chunk := bytes.SplitN(data, []byte{'='}, 2) - if len(chunk) != 2 { - return nil, nil - } - key := chunk[0] - value := chunk[1] - - if len(value) < 3 || len(key) == 0 { - return nil, nil - } - if value[0] != '"' || value[len(value)-1] != '"' { - return key, nil - } - return key, value[1 : len(value)-1] -} diff --git a/vendor/github.com/gomarkdown/markdown/parser/block.go b/vendor/github.com/gomarkdown/markdown/parser/block.go deleted file mode 100644 index 4b7326e7d7..0000000000 --- a/vendor/github.com/gomarkdown/markdown/parser/block.go +++ /dev/null @@ -1,1827 +0,0 @@ -package parser - -import ( - "bytes" - "html" - "regexp" - "strconv" - "unicode" - - "github.com/gomarkdown/markdown/ast" -) - -// Parsing block-level elements. - -const ( - charEntity = "&(?:#x[a-f0-9]{1,8}|#[0-9]{1,8}|[a-z][a-z0-9]{1,31});" - escapable = "[!\"#$%&'()*+,./:;<=>?@[\\\\\\]^_`{|}~-]" -) - -const ( - captionTable = "Table: " - captionFigure = "Figure: " - captionQuote = "Quote: " -) - -var ( - reBackslashOrAmp = regexp.MustCompile(`[\&]`) - reEntityOrEscapedChar = regexp.MustCompile(`(?i)\\` + escapable + "|" + charEntity) - - // blockTags is a set of tags that are recognized as HTML block tags. - // Any of these can be included in markdown text without special escaping. - blockTags = map[string]struct{}{ - "blockquote": {}, - "del": {}, - "dd": {}, - "div": {}, - "dl": {}, - "dt": {}, - "fieldset": {}, - "form": {}, - "h1": {}, - "h2": {}, - "h3": {}, - "h4": {}, - "h5": {}, - "h6": {}, - // TODO: technically block but breaks Inline HTML (Simple).text - //"hr": {}, - "iframe": {}, - "ins": {}, - "li": {}, - "math": {}, - "noscript": {}, - "ol": {}, - "pre": {}, - "p": {}, - "script": {}, - "style": {}, - "table": {}, - "ul": {}, - - // HTML5 - "address": {}, - "article": {}, - "aside": {}, - "canvas": {}, - "details": {}, - "dialog": {}, - "figcaption": {}, - "figure": {}, - "footer": {}, - "header": {}, - "hgroup": {}, - "main": {}, - "nav": {}, - "output": {}, - "progress": {}, - "section": {}, - "video": {}, - } -) - -// sanitizeHeadingID returns a sanitized anchor name for the given text. -// Taken from https://github.com/shurcooL/sanitized_anchor_name/blob/master/main.go#L14:1 -func sanitizeHeadingID(text string) string { - var anchorName []rune - var futureDash = false - for _, r := range text { - switch { - case unicode.IsLetter(r) || unicode.IsNumber(r): - if futureDash && len(anchorName) > 0 { - anchorName = append(anchorName, '-') - } - futureDash = false - anchorName = append(anchorName, unicode.ToLower(r)) - default: - futureDash = true - } - } - if len(anchorName) == 0 { - return "empty" - } - return string(anchorName) -} - -// Parse Block-level data. -// Note: this function and many that it calls assume that -// the input buffer ends with a newline. -func (p *Parser) Block(data []byte) { - // this is called recursively: enforce a maximum depth - if p.nesting >= p.maxNesting { - return - } - p.nesting++ - - // parse out one block-level construct at a time - for len(data) > 0 { - // attributes that can be specific before a block element: - // - // {#id .class1 .class2 key="value"} - if p.extensions&Attributes != 0 { - data = p.attribute(data) - } - - if p.extensions&Includes != 0 { - f := p.readInclude - path, address, consumed := p.isInclude(data) - if consumed == 0 { - path, address, consumed = p.isCodeInclude(data) - f = p.readCodeInclude - } - if consumed > 0 { - included := f(p.includeStack.Last(), path, address) - - // if we find a caption below this, we need to include it in 'included', so - // that the caption will be part of the include text. (+1 to skip newline) - for _, caption := range []string{captionFigure, captionTable, captionQuote} { - if _, _, capcon := p.caption(data[consumed+1:], []byte(caption)); capcon > 0 { - included = append(included, data[consumed+1:consumed+1+capcon]...) - consumed += 1 + capcon - break // there can only be 1 caption. - } - } - p.includeStack.Push(path) - p.Block(included) - p.includeStack.Pop() - data = data[consumed:] - continue - } - } - - // user supplied parser function - if p.Opts.ParserHook != nil { - node, blockdata, consumed := p.Opts.ParserHook(data) - if consumed > 0 { - data = data[consumed:] - - if node != nil { - p.AddBlock(node) - if blockdata != nil { - p.Block(blockdata) - p.Finalize(node) - } - } - continue - } - } - - // prefixed heading: - // - // # Heading 1 - // ## Heading 2 - // ... - // ###### Heading 6 - if p.isPrefixHeading(data) { - data = data[p.prefixHeading(data):] - continue - } - - // prefixed special heading: - // (there are no levels.) - // - // .# Abstract - if p.isPrefixSpecialHeading(data) { - data = data[p.prefixSpecialHeading(data):] - continue - } - - // block of preformatted HTML: - // - //
    - // ... - //
    - - if len(data) == 0 { - continue - } - - if data[0] == '<' { - if i := p.html(data, true); i > 0 { - data = data[i:] - continue - } - } - - // title block - // - // % stuff - // % more stuff - // % even more stuff - if p.extensions&Titleblock != 0 { - if data[0] == '%' { - if i := p.titleBlock(data, true); i > 0 { - data = data[i:] - continue - } - } - } - - // blank lines. note: returns the # of bytes to skip - if i := IsEmpty(data); i > 0 { - data = data[i:] - continue - } - - // indented code block: - // - // func max(a, b int) int { - // if a > b { - // return a - // } - // return b - // } - if p.codePrefix(data) > 0 { - data = data[p.code(data):] - continue - } - - // fenced code block: - // - // ``` go - // func fact(n int) int { - // if n <= 1 { - // return n - // } - // return n * fact(n-1) - // } - // ``` - if p.extensions&FencedCode != 0 { - if i := p.fencedCodeBlock(data, true); i > 0 { - data = data[i:] - continue - } - } - - // horizontal rule: - // - // ------ - // or - // ****** - // or - // ______ - if isHRule(data) { - i := skipUntilChar(data, 0, '\n') - hr := ast.HorizontalRule{} - hr.Literal = bytes.Trim(data[:i], " \n") - p.AddBlock(&hr) - data = data[i:] - continue - } - - // block quote: - // - // > A big quote I found somewhere - // > on the web - if p.quotePrefix(data) > 0 { - data = data[p.quote(data):] - continue - } - - // aside: - // - // A> The proof is too large to fit - // A> in the margin. - if p.extensions&Mmark != 0 { - if p.asidePrefix(data) > 0 { - data = data[p.aside(data):] - continue - } - } - - // figure block: - // - // !--- - // ![Alt Text](img.jpg "This is an image") - // ![Alt Text](img2.jpg "This is a second image") - // !--- - if p.extensions&Mmark != 0 { - if i := p.figureBlock(data, true); i > 0 { - data = data[i:] - continue - } - } - - if p.extensions&Tables != 0 { - if i := p.table(data); i > 0 { - data = data[i:] - continue - } - } - - // an itemized/unordered list: - // - // * Item 1 - // * Item 2 - // - // also works with + or - - if p.uliPrefix(data) > 0 { - data = data[p.list(data, 0, 0, '.'):] - continue - } - - // a numbered/ordered list: - // - // 1. Item 1 - // 2. Item 2 - if i := p.oliPrefix(data); i > 0 { - start := 0 - delim := byte('.') - if i > 2 { - if p.extensions&OrderedListStart != 0 { - s := string(data[:i-2]) - start, _ = strconv.Atoi(s) - if start == 1 { - start = 0 - } - } - delim = data[i-2] - } - data = data[p.list(data, ast.ListTypeOrdered, start, delim):] - continue - } - - // definition lists: - // - // Term 1 - // : Definition a - // : Definition b - // - // Term 2 - // : Definition c - if p.extensions&DefinitionLists != 0 { - if p.dliPrefix(data) > 0 { - data = data[p.list(data, ast.ListTypeDefinition, 0, '.'):] - continue - } - } - - if p.extensions&MathJax != 0 { - if i := p.blockMath(data); i > 0 { - data = data[i:] - continue - } - } - - // document matters: - // - // {frontmatter}/{mainmatter}/{backmatter} - if p.extensions&Mmark != 0 { - if i := p.documentMatter(data); i > 0 { - data = data[i:] - continue - } - } - - // anything else must look like a normal paragraph - // note: this finds underlined headings, too - idx := p.paragraph(data) - data = data[idx:] - } - - p.nesting-- -} - -func (p *Parser) AddBlock(n ast.Node) ast.Node { - p.closeUnmatchedBlocks() - - if p.attr != nil { - if c := n.AsContainer(); c != nil { - c.Attribute = p.attr - } - if l := n.AsLeaf(); l != nil { - l.Attribute = p.attr - } - p.attr = nil - } - return p.addChild(n) -} - -func (p *Parser) isPrefixHeading(data []byte) bool { - if len(data) > 0 && data[0] != '#' { - return false - } - - if p.extensions&SpaceHeadings != 0 { - level := skipCharN(data, 0, '#', 6) - if level == len(data) || data[level] != ' ' { - return false - } - } - return true -} - -func (p *Parser) prefixHeading(data []byte) int { - level := skipCharN(data, 0, '#', 6) - i := skipChar(data, level, ' ') - end := skipUntilChar(data, i, '\n') - skip := end - id := "" - if p.extensions&HeadingIDs != 0 { - j, k := 0, 0 - // find start/end of heading id - for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ { - } - for k = j + 1; k < end && data[k] != '}'; k++ { - } - // extract heading id iff found - if j < end && k < end { - id = string(data[j+2 : k]) - end = j - skip = k + 1 - for end > 0 && data[end-1] == ' ' { - end-- - } - } - } - for end > 0 && data[end-1] == '#' { - if isBackslashEscaped(data, end-1) { - break - } - end-- - } - for end > 0 && data[end-1] == ' ' { - end-- - } - if end > i { - block := &ast.Heading{ - HeadingID: id, - Level: level, - } - if id == "" && p.extensions&AutoHeadingIDs != 0 { - block.HeadingID = sanitizeHeadingID(string(data[i:end])) - p.allHeadingsWithAutoID = append(p.allHeadingsWithAutoID, block) - } - block.Content = data[i:end] - p.AddBlock(block) - } - return skip -} - -func (p *Parser) isPrefixSpecialHeading(data []byte) bool { - if p.extensions|Mmark == 0 { - return false - } - if len(data) < 4 { - return false - } - if data[0] != '.' { - return false - } - if data[1] != '#' { - return false - } - if data[2] == '#' { // we don't support level, so nack this. - return false - } - - if p.extensions&SpaceHeadings != 0 { - if data[2] != ' ' { - return false - } - } - return true -} - -func (p *Parser) prefixSpecialHeading(data []byte) int { - i := skipChar(data, 2, ' ') // ".#" skipped - end := skipUntilChar(data, i, '\n') - skip := end - id := "" - if p.extensions&HeadingIDs != 0 { - j, k := 0, 0 - // find start/end of heading id - for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ { - } - for k = j + 1; k < end && data[k] != '}'; k++ { - } - // extract heading id iff found - if j < end && k < end { - id = string(data[j+2 : k]) - end = j - skip = k + 1 - for end > 0 && data[end-1] == ' ' { - end-- - } - } - } - for end > 0 && data[end-1] == '#' { - if isBackslashEscaped(data, end-1) { - break - } - end-- - } - for end > 0 && data[end-1] == ' ' { - end-- - } - if end > i { - block := &ast.Heading{ - HeadingID: id, - IsSpecial: true, - Level: 1, // always level 1. - } - if id == "" && p.extensions&AutoHeadingIDs != 0 { - block.HeadingID = sanitizeHeadingID(string(data[i:end])) - p.allHeadingsWithAutoID = append(p.allHeadingsWithAutoID, block) - } - block.Literal = data[i:end] - block.Content = data[i:end] - p.AddBlock(block) - } - return skip -} - -func (p *Parser) isUnderlinedHeading(data []byte) int { - // test of level 1 heading - if data[0] == '=' { - i := skipChar(data, 1, '=') - i = skipChar(data, i, ' ') - if i < len(data) && data[i] == '\n' { - return 1 - } - return 0 - } - - // test of level 2 heading - if data[0] == '-' { - i := skipChar(data, 1, '-') - i = skipChar(data, i, ' ') - if i < len(data) && data[i] == '\n' { - return 2 - } - return 0 - } - - return 0 -} - -func (p *Parser) titleBlock(data []byte, doRender bool) int { - if data[0] != '%' { - return 0 - } - splitData := bytes.Split(data, []byte("\n")) - var i int - for idx, b := range splitData { - if !bytes.HasPrefix(b, []byte("%")) { - i = idx // - 1 - break - } - } - - data = bytes.Join(splitData[0:i], []byte("\n")) - consumed := len(data) - data = bytes.TrimPrefix(data, []byte("% ")) - data = bytes.Replace(data, []byte("\n% "), []byte("\n"), -1) - block := &ast.Heading{ - Level: 1, - IsTitleblock: true, - } - block.Content = data - p.AddBlock(block) - - return consumed -} - -func (p *Parser) html(data []byte, doRender bool) int { - var i, j int - - // identify the opening tag - if data[0] != '<' { - return 0 - } - curtag, tagfound := p.htmlFindTag(data[1:]) - - // handle special cases - if !tagfound { - // check for an HTML comment - if size := p.htmlComment(data, doRender); size > 0 { - return size - } - - // check for an
    tag - if size := p.htmlHr(data, doRender); size > 0 { - return size - } - - // no special case recognized - return 0 - } - - // look for an unindented matching closing tag - // followed by a blank line - found := false - /* - closetag := []byte("\n") - j = len(curtag) + 1 - for !found { - // scan for a closing tag at the beginning of a line - if skip := bytes.Index(data[j:], closetag); skip >= 0 { - j += skip + len(closetag) - } else { - break - } - - // see if it is the only thing on the line - if skip := IsEmpty(data[j:]); skip > 0 { - // see if it is followed by a blank line/eof - j += skip - if j >= len(data) { - found = true - i = j - } else { - if skip := IsEmpty(data[j:]); skip > 0 { - j += skip - found = true - i = j - } - } - } - } - */ - - // if not found, try a second pass looking for indented match - // but not if tag is "ins" or "del" (following original Markdown.pl) - if !found && curtag != "ins" && curtag != "del" { - i = 1 - for i < len(data) { - i++ - for i < len(data) && !(data[i-1] == '<' && data[i] == '/') { - i++ - } - - if i+2+len(curtag) >= len(data) { - break - } - - j = p.htmlFindEnd(curtag, data[i-1:]) - - if j > 0 { - i += j - 1 - found = true - break - } - } - } - - if !found { - return 0 - } - - // the end of the block has been found - if doRender { - // trim newlines - end := backChar(data, i, '\n') - htmlBLock := &ast.HTMLBlock{Leaf: ast.Leaf{Content: data[:end]}} - p.AddBlock(htmlBLock) - finalizeHTMLBlock(htmlBLock) - } - - return i -} - -func finalizeHTMLBlock(block *ast.HTMLBlock) { - block.Literal = block.Content - block.Content = nil -} - -// HTML comment, lax form -func (p *Parser) htmlComment(data []byte, doRender bool) int { - i := p.inlineHTMLComment(data) - // needs to end with a blank line - if j := IsEmpty(data[i:]); j > 0 { - size := i + j - if doRender { - // trim trailing newlines - end := backChar(data, size, '\n') - htmlBLock := &ast.HTMLBlock{Leaf: ast.Leaf{Content: data[:end]}} - p.AddBlock(htmlBLock) - finalizeHTMLBlock(htmlBLock) - } - return size - } - return 0 -} - -// HR, which is the only self-closing block tag considered -func (p *Parser) htmlHr(data []byte, doRender bool) int { - if len(data) < 4 { - return 0 - } - if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') { - return 0 - } - if data[3] != ' ' && data[3] != '/' && data[3] != '>' { - // not an
    tag after all; at least not a valid one - return 0 - } - i := 3 - for i < len(data) && data[i] != '>' && data[i] != '\n' { - i++ - } - if i < len(data) && data[i] == '>' { - i++ - if j := IsEmpty(data[i:]); j > 0 { - size := i + j - if doRender { - // trim newlines - end := backChar(data, size, '\n') - htmlBlock := &ast.HTMLBlock{Leaf: ast.Leaf{Content: data[:end]}} - p.AddBlock(htmlBlock) - finalizeHTMLBlock(htmlBlock) - } - return size - } - } - return 0 -} - -func (p *Parser) htmlFindTag(data []byte) (string, bool) { - i := skipAlnum(data, 0) - key := string(data[:i]) - if _, ok := blockTags[key]; ok { - return key, true - } - return "", false -} - -func (p *Parser) htmlFindEnd(tag string, data []byte) int { - // assume data[0] == '<' && data[1] == '/' already tested - if tag == "hr" { - return 2 - } - // check if tag is a match - closetag := []byte("") - if !bytes.HasPrefix(data, closetag) { - return 0 - } - i := len(closetag) - - // check that the rest of the line is blank - skip := 0 - if skip = IsEmpty(data[i:]); skip == 0 { - return 0 - } - i += skip - skip = 0 - - if i >= len(data) { - return i - } - - if p.extensions&LaxHTMLBlocks != 0 { - return i - } - if skip = IsEmpty(data[i:]); skip == 0 { - // following line must be blank - return 0 - } - - return i + skip -} - -func IsEmpty(data []byte) int { - // it is okay to call isEmpty on an empty buffer - if len(data) == 0 { - return 0 - } - - var i int - for i = 0; i < len(data) && data[i] != '\n'; i++ { - if data[i] != ' ' && data[i] != '\t' { - return 0 - } - } - i = skipCharN(data, i, '\n', 1) - return i -} - -func isHRule(data []byte) bool { - i := 0 - - // skip up to three spaces - for i < 3 && data[i] == ' ' { - i++ - } - - // look at the hrule char - if data[i] != '*' && data[i] != '-' && data[i] != '_' { - return false - } - c := data[i] - - // the whole line must be the char or whitespace - n := 0 - for i < len(data) && data[i] != '\n' { - switch { - case data[i] == c: - n++ - case data[i] != ' ': - return false - } - i++ - } - - return n >= 3 -} - -// isFenceLine checks if there's a fence line (e.g., ``` or ``` go) at the beginning of data, -// and returns the end index if so, or 0 otherwise. It also returns the marker found. -// If syntax is not nil, it gets set to the syntax specified in the fence line. -func isFenceLine(data []byte, syntax *string, oldmarker string) (end int, marker string) { - i, size := 0, 0 - - n := len(data) - // skip up to three spaces - for i < n && i < 3 && data[i] == ' ' { - i++ - } - - // check for the marker characters: ~ or ` - if i >= n { - return 0, "" - } - if data[i] != '~' && data[i] != '`' { - return 0, "" - } - - c := data[i] - - // the whole line must be the same char or whitespace - for i < n && data[i] == c { - size++ - i++ - } - - // the marker char must occur at least 3 times - if size < 3 { - return 0, "" - } - marker = string(data[i-size : i]) - - // if this is the end marker, it must match the beginning marker - if oldmarker != "" && marker != oldmarker { - return 0, "" - } - - // if just read the beginning marker, read the syntax - if oldmarker == "" { - i = skipChar(data, i, ' ') - if i >= n { - if i == n { - return i, marker - } - return 0, "" - } - - syntaxStart, syntaxLen := syntaxRange(data, &i) - if syntaxStart == 0 && syntaxLen == 0 { - return 0, "" - } - - // caller wants the syntax - if syntax != nil { - *syntax = string(data[syntaxStart : syntaxStart+syntaxLen]) - } - } - - i = skipChar(data, i, ' ') - if i >= n || data[i] != '\n' { - if i == n { - return i, marker - } - return 0, "" - } - return i + 1, marker // Take newline into account. -} - -func syntaxRange(data []byte, iout *int) (int, int) { - n := len(data) - syn := 0 - i := *iout - syntaxStart := i - if data[i] == '{' { - i++ - syntaxStart++ - - for i < n && data[i] != '}' && data[i] != '\n' { - syn++ - i++ - } - - if i >= n || data[i] != '}' { - return 0, 0 - } - - // strip all whitespace at the beginning and the end - // of the {} block - for syn > 0 && IsSpace(data[syntaxStart]) { - syntaxStart++ - syn-- - } - - for syn > 0 && IsSpace(data[syntaxStart+syn-1]) { - syn-- - } - - i++ - } else { - for i < n && !IsSpace(data[i]) { - syn++ - i++ - } - } - - *iout = i - return syntaxStart, syn -} - -// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning, -// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects. -// If doRender is true, a final newline is mandatory to recognize the fenced code block. -func (p *Parser) fencedCodeBlock(data []byte, doRender bool) int { - var syntax string - beg, marker := isFenceLine(data, &syntax, "") - if beg == 0 || beg >= len(data) { - return 0 - } - - var work bytes.Buffer - work.WriteString(syntax) - work.WriteByte('\n') - - for { - // safe to assume beg < len(data) - - // check for the end of the code block - fenceEnd, _ := isFenceLine(data[beg:], nil, marker) - if fenceEnd != 0 { - beg += fenceEnd - break - } - - // copy the current line - end := skipUntilChar(data, beg, '\n') + 1 - - // did we reach the end of the buffer without a closing marker? - if end >= len(data) { - return 0 - } - - // verbatim copy to the working buffer - if doRender { - work.Write(data[beg:end]) - } - beg = end - } - - if doRender { - codeBlock := &ast.CodeBlock{ - IsFenced: true, - } - codeBlock.Content = work.Bytes() // TODO: get rid of temp buffer - - if p.extensions&Mmark == 0 { - p.AddBlock(codeBlock) - finalizeCodeBlock(codeBlock) - return beg - } - - // Check for caption and if found make it a figure. - if captionContent, id, consumed := p.caption(data[beg:], []byte(captionFigure)); consumed > 0 { - figure := &ast.CaptionFigure{} - caption := &ast.Caption{} - figure.HeadingID = id - p.Inline(caption, captionContent) - - p.AddBlock(figure) - codeBlock.AsLeaf().Attribute = figure.AsContainer().Attribute - p.addChild(codeBlock) - finalizeCodeBlock(codeBlock) - p.addChild(caption) - p.Finalize(figure) - - beg += consumed - - return beg - } - - // Still here, normal block - p.AddBlock(codeBlock) - finalizeCodeBlock(codeBlock) - } - - return beg -} - -func unescapeChar(str []byte) []byte { - if str[0] == '\\' { - return []byte{str[1]} - } - return []byte(html.UnescapeString(string(str))) -} - -func unescapeString(str []byte) []byte { - if reBackslashOrAmp.Match(str) { - return reEntityOrEscapedChar.ReplaceAllFunc(str, unescapeChar) - } - return str -} - -func finalizeCodeBlock(code *ast.CodeBlock) { - c := code.Content - if code.IsFenced { - newlinePos := bytes.IndexByte(c, '\n') - firstLine := c[:newlinePos] - rest := c[newlinePos+1:] - code.Info = unescapeString(bytes.Trim(firstLine, "\n")) - code.Literal = rest - } else { - code.Literal = c - } - code.Content = nil -} - -// returns blockquote prefix length -func (p *Parser) quotePrefix(data []byte) int { - i := 0 - n := len(data) - for i < 3 && i < n && data[i] == ' ' { - i++ - } - if i < n && data[i] == '>' { - if i+1 < n && data[i+1] == ' ' { - return i + 2 - } - return i + 1 - } - return 0 -} - -// blockquote ends with at least one blank line -// followed by something without a blockquote prefix -func (p *Parser) terminateBlockquote(data []byte, beg, end int) bool { - if IsEmpty(data[beg:]) <= 0 { - return false - } - if end >= len(data) { - return true - } - return p.quotePrefix(data[end:]) == 0 && IsEmpty(data[end:]) == 0 -} - -// parse a blockquote fragment -func (p *Parser) quote(data []byte) int { - var raw bytes.Buffer - beg, end := 0, 0 - for beg < len(data) { - end = beg - // Step over whole lines, collecting them. While doing that, check for - // fenced code and if one's found, incorporate it altogether, - // irregardless of any contents inside it - for end < len(data) && data[end] != '\n' { - if p.extensions&FencedCode != 0 { - if i := p.fencedCodeBlock(data[end:], false); i > 0 { - // -1 to compensate for the extra end++ after the loop: - end += i - 1 - break - } - } - end++ - } - end = skipCharN(data, end, '\n', 1) - if pre := p.quotePrefix(data[beg:]); pre > 0 { - // skip the prefix - beg += pre - } else if p.terminateBlockquote(data, beg, end) { - break - } - // this line is part of the blockquote - raw.Write(data[beg:end]) - beg = end - } - - if p.extensions&Mmark == 0 { - block := p.AddBlock(&ast.BlockQuote{}) - p.Block(raw.Bytes()) - p.Finalize(block) - return end - } - - if captionContent, id, consumed := p.caption(data[end:], []byte(captionQuote)); consumed > 0 { - figure := &ast.CaptionFigure{} - caption := &ast.Caption{} - figure.HeadingID = id - p.Inline(caption, captionContent) - - p.AddBlock(figure) // this discard any attributes - block := &ast.BlockQuote{} - block.AsContainer().Attribute = figure.AsContainer().Attribute - p.addChild(block) - p.Block(raw.Bytes()) - p.Finalize(block) - - p.addChild(caption) - p.Finalize(figure) - - end += consumed - - return end - } - - block := p.AddBlock(&ast.BlockQuote{}) - p.Block(raw.Bytes()) - p.Finalize(block) - - return end -} - -// returns prefix length for block code -func (p *Parser) codePrefix(data []byte) int { - n := len(data) - if n >= 1 && data[0] == '\t' { - return 1 - } - if n >= 4 && data[3] == ' ' && data[2] == ' ' && data[1] == ' ' && data[0] == ' ' { - return 4 - } - return 0 -} - -func (p *Parser) code(data []byte) int { - var work bytes.Buffer - - i := 0 - for i < len(data) { - beg := i - - i = skipUntilChar(data, i, '\n') - i = skipCharN(data, i, '\n', 1) - - blankline := IsEmpty(data[beg:i]) > 0 - if pre := p.codePrefix(data[beg:i]); pre > 0 { - beg += pre - } else if !blankline { - // non-empty, non-prefixed line breaks the pre - i = beg - break - } - - // verbatim copy to the working buffer - if blankline { - work.WriteByte('\n') - } else { - work.Write(data[beg:i]) - } - } - - // trim all the \n off the end of work - workbytes := work.Bytes() - - eol := backChar(workbytes, len(workbytes), '\n') - - if eol != len(workbytes) { - work.Truncate(eol) - } - - work.WriteByte('\n') - - codeBlock := &ast.CodeBlock{ - IsFenced: false, - } - // TODO: get rid of temp buffer - codeBlock.Content = work.Bytes() - p.AddBlock(codeBlock) - finalizeCodeBlock(codeBlock) - - return i -} - -// returns unordered list item prefix -func (p *Parser) uliPrefix(data []byte) int { - // start with up to 3 spaces - i := skipCharN(data, 0, ' ', 3) - - if i >= len(data)-1 { - return 0 - } - // need one of {'*', '+', '-'} followed by a space or a tab - if (data[i] != '*' && data[i] != '+' && data[i] != '-') || - (data[i+1] != ' ' && data[i+1] != '\t') { - return 0 - } - return i + 2 -} - -// returns ordered list item prefix -func (p *Parser) oliPrefix(data []byte) int { - // start with up to 3 spaces - i := skipCharN(data, 0, ' ', 3) - - // count the digits - start := i - for i < len(data) && data[i] >= '0' && data[i] <= '9' { - i++ - } - if start == i || i >= len(data)-1 { - return 0 - } - - // we need >= 1 digits followed by a dot and a space or a tab - if data[i] != '.' && data[i] != ')' || !(data[i+1] == ' ' || data[i+1] == '\t') { - return 0 - } - return i + 2 -} - -// returns definition list item prefix -func (p *Parser) dliPrefix(data []byte) int { - if len(data) < 2 { - return 0 - } - // need a ':' followed by a space or a tab - if data[0] != ':' || !(data[1] == ' ' || data[1] == '\t') { - return 0 - } - // TODO: this is a no-op (data[0] is ':' so not ' '). - // Maybe the intent was to eat spaces before ':' ? - // either way, no change in tests - i := skipChar(data, 0, ' ') - return i + 2 -} - -// TODO: maybe it was meant to be like below -// either way, no change in tests -/* -func (p *Parser) dliPrefix(data []byte) int { - i := skipChar(data, 0, ' ') - if i+len(data) < 2 { - return 0 - } - // need a ':' followed by a space or a tab - if data[i] != ':' || !(data[i+1] == ' ' || data[i+1] == '\t') { - return 0 - } - return i + 2 -} -*/ - -// parse ordered or unordered list block -func (p *Parser) list(data []byte, flags ast.ListType, start int, delim byte) int { - i := 0 - flags |= ast.ListItemBeginningOfList - list := &ast.List{ - ListFlags: flags, - Tight: true, - Start: start, - Delimiter: delim, - } - block := p.AddBlock(list) - - for i < len(data) { - skip := p.listItem(data[i:], &flags) - if flags&ast.ListItemContainsBlock != 0 { - list.Tight = false - } - i += skip - if skip == 0 || flags&ast.ListItemEndOfList != 0 { - break - } - flags &= ^ast.ListItemBeginningOfList - } - - above := block.GetParent() - finalizeList(list) - p.tip = above - return i -} - -// Returns true if the list item is not the same type as its parent list -func (p *Parser) listTypeChanged(data []byte, flags *ast.ListType) bool { - if p.dliPrefix(data) > 0 && *flags&ast.ListTypeDefinition == 0 { - return true - } else if p.oliPrefix(data) > 0 && *flags&ast.ListTypeOrdered == 0 { - return true - } else if p.uliPrefix(data) > 0 && (*flags&ast.ListTypeOrdered != 0 || *flags&ast.ListTypeDefinition != 0) { - return true - } - return false -} - -// Returns true if block ends with a blank line, descending if needed -// into lists and sublists. -func endsWithBlankLine(block ast.Node) bool { - // TODO: figure this out. Always false now. - for block != nil { - //if block.lastLineBlank { - //return true - //} - switch block.(type) { - case *ast.List, *ast.ListItem: - block = ast.GetLastChild(block) - default: - return false - } - } - return false -} - -func finalizeList(list *ast.List) { - items := list.Parent.GetChildren() - lastItemIdx := len(items) - 1 - for i, item := range items { - isLastItem := i == lastItemIdx - // check for non-final list item ending with blank line: - if !isLastItem && endsWithBlankLine(item) { - list.Tight = false - break - } - // recurse into children of list item, to see if there are spaces - // between any of them: - subItems := item.GetParent().GetChildren() - lastSubItemIdx := len(subItems) - 1 - for j, subItem := range subItems { - isLastSubItem := j == lastSubItemIdx - if (!isLastItem || !isLastSubItem) && endsWithBlankLine(subItem) { - list.Tight = false - break - } - } - } -} - -// Parse a single list item. -// Assumes initial prefix is already removed if this is a sublist. -func (p *Parser) listItem(data []byte, flags *ast.ListType) int { - // keep track of the indentation of the first line - itemIndent := 0 - if data[0] == '\t' { - itemIndent += 4 - } else { - for itemIndent < 3 && data[itemIndent] == ' ' { - itemIndent++ - } - } - - var ( - bulletChar byte = '*' - delimiter byte = '.' - ) - i := p.uliPrefix(data) - if i == 0 { - i = p.oliPrefix(data) - if i > 0 { - delimiter = data[i-2] - } - } else { - bulletChar = data[i-2] - } - if i == 0 { - i = p.dliPrefix(data) - // reset definition term flag - if i > 0 { - *flags &= ^ast.ListTypeTerm - } - } - if i == 0 { - // if in definition list, set term flag and continue - if *flags&ast.ListTypeDefinition != 0 { - *flags |= ast.ListTypeTerm - } else { - return 0 - } - } - - // skip leading whitespace on first line - i = skipChar(data, i, ' ') - - // find the end of the line - line := i - for i > 0 && i < len(data) && data[i-1] != '\n' { - i++ - } - - // get working buffer - var raw bytes.Buffer - - // put the first line into the working buffer - raw.Write(data[line:i]) - line = i - - // process the following lines - containsBlankLine := false - sublist := 0 - -gatherlines: - for line < len(data) { - i++ - - // find the end of this line - for i < len(data) && data[i-1] != '\n' { - i++ - } - - // if it is an empty line, guess that it is part of this item - // and move on to the next line - if IsEmpty(data[line:i]) > 0 { - containsBlankLine = true - line = i - continue - } - - // calculate the indentation - indent := 0 - indentIndex := 0 - if data[line] == '\t' { - indentIndex++ - indent += 4 - } else { - for indent < 4 && line+indent < i && data[line+indent] == ' ' { - indent++ - indentIndex++ - } - } - - chunk := data[line+indentIndex : i] - - // If there is a fence line (marking starting of a code block) - // without indent do not process it as part of the list. - if p.extensions&FencedCode != 0 { - fenceLineEnd, _ := isFenceLine(chunk, nil, "") - if fenceLineEnd > 0 && indent == 0 { - *flags |= ast.ListItemEndOfList - break gatherlines - } - } - - // evaluate how this line fits in - switch { - // is this a nested list item? - case (p.uliPrefix(chunk) > 0 && !isHRule(chunk)) || p.oliPrefix(chunk) > 0 || p.dliPrefix(chunk) > 0: - - // if indent is 4 or more spaces on unordered or ordered lists - // we need to add leadingWhiteSpaces + 1 spaces in the beginning of the chunk - if indentIndex >= 4 && p.dliPrefix(chunk) <= 0 { - leadingWhiteSpaces := skipChar(chunk, 0, ' ') - chunk = data[line+indentIndex-(leadingWhiteSpaces+1) : i] - } - - // to be a nested list, it must be indented more - // if not, it is either a different kind of list - // or the next item in the same list - if indent <= itemIndent { - if p.listTypeChanged(chunk, flags) { - *flags |= ast.ListItemEndOfList - } else if containsBlankLine { - *flags |= ast.ListItemContainsBlock - } - - break gatherlines - } - - if containsBlankLine { - *flags |= ast.ListItemContainsBlock - } - - // is this the first item in the nested list? - if sublist == 0 { - sublist = raw.Len() - // in the case of dliPrefix we are too late and need to search back for the definition item, which - // should be on the previous line, we then adjust sublist to start there. - if p.dliPrefix(chunk) > 0 { - sublist = backUntilChar(raw.Bytes(), raw.Len()-1, '\n') - } - } - - // is this a nested prefix heading? - case p.isPrefixHeading(chunk), p.isPrefixSpecialHeading(chunk): - // if the heading is not indented, it is not nested in the list - // and thus ends the list - if containsBlankLine && indent < 4 { - *flags |= ast.ListItemEndOfList - break gatherlines - } - *flags |= ast.ListItemContainsBlock - - // anything following an empty line is only part - // of this item if it is indented 4 spaces - // (regardless of the indentation of the beginning of the item) - case containsBlankLine && indent < 4: - if *flags&ast.ListTypeDefinition != 0 && i < len(data)-1 { - // is the next item still a part of this list? - next := skipUntilChar(data, i, '\n') - for next < len(data)-1 && data[next] == '\n' { - next++ - } - if i < len(data)-1 && data[i] != ':' && next < len(data)-1 && data[next] != ':' { - *flags |= ast.ListItemEndOfList - } - } else { - *flags |= ast.ListItemEndOfList - } - break gatherlines - - // a blank line means this should be parsed as a block - case containsBlankLine: - raw.WriteByte('\n') - *flags |= ast.ListItemContainsBlock - } - - // if this line was preceded by one or more blanks, - // re-introduce the blank into the buffer - if containsBlankLine { - containsBlankLine = false - raw.WriteByte('\n') - } - - // add the line into the working buffer without prefix - raw.Write(chunk) - - line = i - } - - rawBytes := raw.Bytes() - - listItem := &ast.ListItem{ - ListFlags: *flags, - Tight: false, - BulletChar: bulletChar, - Delimiter: delimiter, - } - p.AddBlock(listItem) - - // render the contents of the list item - if *flags&ast.ListItemContainsBlock != 0 && *flags&ast.ListTypeTerm == 0 { - // intermediate render of block item, except for definition term - if sublist > 0 { - p.Block(rawBytes[:sublist]) - p.Block(rawBytes[sublist:]) - } else { - p.Block(rawBytes) - } - } else { - // intermediate render of inline item - para := &ast.Paragraph{} - if sublist > 0 { - para.Content = rawBytes[:sublist] - } else { - para.Content = rawBytes - } - p.addChild(para) - if sublist > 0 { - p.Block(rawBytes[sublist:]) - } - } - return line -} - -// render a single paragraph that has already been parsed out -func (p *Parser) renderParagraph(data []byte) { - if len(data) == 0 { - return - } - - // trim leading spaces - beg := skipChar(data, 0, ' ') - - end := len(data) - // trim trailing newline - if data[len(data)-1] == '\n' { - end-- - } - - // trim trailing spaces - for end > beg && data[end-1] == ' ' { - end-- - } - para := &ast.Paragraph{} - para.Content = data[beg:end] - p.AddBlock(para) -} - -// blockMath handle block surround with $$ -func (p *Parser) blockMath(data []byte) int { - if len(data) <= 4 || data[0] != '$' || data[1] != '$' || data[2] == '$' { - return 0 - } - - // find next $$ - var end int - for end = 2; end+1 < len(data) && (data[end] != '$' || data[end+1] != '$'); end++ { - } - - // $$ not match - if end+1 == len(data) { - return 0 - } - - // render the display math - mathBlock := &ast.MathBlock{} - mathBlock.Literal = data[2:end] - p.AddBlock(mathBlock) - - return end + 2 -} - -func (p *Parser) paragraph(data []byte) int { - // prev: index of 1st char of previous line - // line: index of 1st char of current line - // i: index of cursor/end of current line - var prev, line, i int - tabSize := tabSizeDefault - if p.extensions&TabSizeEight != 0 { - tabSize = tabSizeDouble - } - // keep going until we find something to mark the end of the paragraph - for i < len(data) { - // mark the beginning of the current line - prev = line - current := data[i:] - line = i - - // did we find a reference or a footnote? If so, end a paragraph - // preceding it and report that we have consumed up to the end of that - // reference: - if refEnd := isReference(p, current, tabSize); refEnd > 0 { - p.renderParagraph(data[:i]) - return i + refEnd - } - - // did we find a blank line marking the end of the paragraph? - if n := IsEmpty(current); n > 0 { - // did this blank line followed by a definition list item? - if p.extensions&DefinitionLists != 0 { - if i < len(data)-1 && data[i+1] == ':' { - listLen := p.list(data[prev:], ast.ListTypeDefinition, 0, '.') - return prev + listLen - } - } - - p.renderParagraph(data[:i]) - return i + n - } - - // an underline under some text marks a heading, so our paragraph ended on prev line - if i > 0 { - if level := p.isUnderlinedHeading(current); level > 0 { - // render the paragraph - p.renderParagraph(data[:prev]) - - // ignore leading and trailing whitespace - eol := i - 1 - for prev < eol && data[prev] == ' ' { - prev++ - } - for eol > prev && data[eol-1] == ' ' { - eol-- - } - - block := &ast.Heading{ - Level: level, - } - if p.extensions&AutoHeadingIDs != 0 { - block.HeadingID = sanitizeHeadingID(string(data[prev:eol])) - p.allHeadingsWithAutoID = append(p.allHeadingsWithAutoID, block) - } - - block.Content = data[prev:eol] - p.AddBlock(block) - - // find the end of the underline - return skipUntilChar(data, i, '\n') - } - } - - // if the next line starts a block of HTML, then the paragraph ends here - if p.extensions&LaxHTMLBlocks != 0 { - if data[i] == '<' && p.html(current, false) > 0 { - // rewind to before the HTML block - p.renderParagraph(data[:i]) - return i - } - } - - // if there's a prefixed heading or a horizontal rule after this, paragraph is over - if p.isPrefixHeading(current) || p.isPrefixSpecialHeading(current) || isHRule(current) { - p.renderParagraph(data[:i]) - return i - } - - // if there's a block quote, paragraph is over - if p.quotePrefix(current) > 0 { - p.renderParagraph(data[:i]) - return i - } - - // if there's a fenced code block, paragraph is over - if p.extensions&FencedCode != 0 { - if p.fencedCodeBlock(current, false) > 0 { - p.renderParagraph(data[:i]) - return i - } - } - - // if there's a figure block, paragraph is over - if p.extensions&Mmark != 0 { - if p.figureBlock(current, false) > 0 { - p.renderParagraph(data[:i]) - return i - } - } - - // if there's a table, paragraph is over - if p.extensions&Tables != 0 { - if j, _, _ := p.tableHeader(current, false); j > 0 { - p.renderParagraph(data[:i]) - return i - } - } - - // if there's a definition list item, prev line is a definition term - if p.extensions&DefinitionLists != 0 { - if p.dliPrefix(current) != 0 { - ret := p.list(data[prev:], ast.ListTypeDefinition, 0, '.') - return ret + prev - } - } - - // if there's a list after this, paragraph is over - if p.extensions&NoEmptyLineBeforeBlock != 0 { - if p.uliPrefix(current) != 0 || - p.oliPrefix(current) != 0 || - p.quotePrefix(current) != 0 || - p.codePrefix(current) != 0 { - p.renderParagraph(data[:i]) - return i - } - } - - // otherwise, scan to the beginning of the next line - nl := bytes.IndexByte(data[i:], '\n') - if nl >= 0 { - i += nl + 1 - } else { - i += len(data[i:]) - } - } - - p.renderParagraph(data[:i]) - return i -} - -// skipChar advances i as long as data[i] == c -func skipChar(data []byte, i int, c byte) int { - n := len(data) - for i < n && data[i] == c { - i++ - } - return i -} - -// like skipChar but only skips up to max characters -func skipCharN(data []byte, i int, c byte, max int) int { - n := len(data) - for i < n && max > 0 && data[i] == c { - i++ - max-- - } - return i -} - -// skipUntilChar advances i as long as data[i] != c -func skipUntilChar(data []byte, i int, c byte) int { - n := len(data) - for i < n && data[i] != c { - i++ - } - return i -} - -func skipAlnum(data []byte, i int) int { - n := len(data) - for i < n && IsAlnum(data[i]) { - i++ - } - return i -} - -func skipSpace(data []byte, i int) int { - n := len(data) - for i < n && IsSpace(data[i]) { - i++ - } - return i -} - -func backChar(data []byte, i int, c byte) int { - for i > 0 && data[i-1] == c { - i-- - } - return i -} - -func backUntilChar(data []byte, i int, c byte) int { - for i > 0 && data[i-1] != c { - i-- - } - return i -} diff --git a/vendor/github.com/gomarkdown/markdown/parser/block_table.go b/vendor/github.com/gomarkdown/markdown/parser/block_table.go deleted file mode 100644 index fa8efdf26c..0000000000 --- a/vendor/github.com/gomarkdown/markdown/parser/block_table.go +++ /dev/null @@ -1,328 +0,0 @@ -package parser - -import "github.com/gomarkdown/markdown/ast" - -// check if the specified position is preceded by an odd number of backslashes -func isBackslashEscaped(data []byte, i int) bool { - backslashes := 0 - for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' { - backslashes++ - } - return backslashes&1 == 1 -} - -func (p *Parser) tableRow(data []byte, columns []ast.CellAlignFlags, header bool) { - p.AddBlock(&ast.TableRow{}) - col := 0 - - i := skipChar(data, 0, '|') - - n := len(data) - colspans := 0 // keep track of total colspan in this row. - for col = 0; col < len(columns) && i < n; col++ { - colspan := 0 - i = skipChar(data, i, ' ') - - cellStart := i - - // If we are in a codespan we should discount any | we see, check for that here and skip ahead. - if isCode, _ := codeSpan(p, data[i:], 0); isCode > 0 { - i += isCode - 1 - } - - for i < n && (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' { - i++ - } - - cellEnd := i - - // skip the end-of-cell marker, possibly taking us past end of buffer - // each _extra_ | means a colspan - for i < len(data) && data[i] == '|' && !isBackslashEscaped(data, i) { - i++ - colspan++ - } - // only colspan > 1 make sense. - if colspan < 2 { - colspan = 0 - } - - for cellEnd > cellStart && cellEnd-1 < n && data[cellEnd-1] == ' ' { - cellEnd-- - } - - block := &ast.TableCell{ - IsHeader: header, - Align: columns[col], - ColSpan: colspan, - } - block.Content = data[cellStart:cellEnd] - if cellStart == cellEnd && colspans > 0 { - // an empty cell that we should ignore, it exists because of colspan - colspans-- - } else { - p.AddBlock(block) - } - - if colspan > 0 { - colspans += colspan - 1 - } - } - - // pad it out with empty columns to get the right number - for ; col < len(columns); col++ { - block := &ast.TableCell{ - IsHeader: header, - Align: columns[col], - } - p.AddBlock(block) - } - - // silently ignore rows with too many cells -} - -// tableFooter parses the (optional) table footer. -func (p *Parser) tableFooter(data []byte) bool { - colCount := 1 - - // ignore up to 3 spaces - n := len(data) - i := skipCharN(data, 0, ' ', 3) - for ; i < n && data[i] != '\n'; i++ { - // If we are in a codespan we should discount any | we see, check for that here and skip ahead. - if isCode, _ := codeSpan(p, data[i:], 0); isCode > 0 { - i += isCode - 1 - } - - if data[i] == '|' && !isBackslashEscaped(data, i) { - colCount++ - continue - } - // remaining data must be the = character - if data[i] != '=' { - return false - } - } - - // doesn't look like a table footer - if colCount == 1 { - return false - } - - p.AddBlock(&ast.TableFooter{}) - - return true -} - -// tableHeaders parses the header. If recognized it will also add a table. -func (p *Parser) tableHeader(data []byte, doRender bool) (size int, columns []ast.CellAlignFlags, table ast.Node) { - i := 0 - colCount := 1 - headerIsUnderline := true - headerIsWithEmptyFields := true - for i = 0; i < len(data) && data[i] != '\n'; i++ { - // If we are in a codespan we should discount any | we see, check for that here and skip ahead. - if isCode, _ := codeSpan(p, data[i:], 0); isCode > 0 { - i += isCode - 1 - } - - if data[i] == '|' && !isBackslashEscaped(data, i) { - colCount++ - } - if data[i] != '-' && data[i] != ' ' && data[i] != ':' && data[i] != '|' { - headerIsUnderline = false - } - if data[i] != ' ' && data[i] != '|' { - headerIsWithEmptyFields = false - } - } - - // doesn't look like a table header - if colCount == 1 { - return - } - - // include the newline in the data sent to tableRow - j := skipCharN(data, i, '\n', 1) - header := data[:j] - - // column count ignores pipes at beginning or end of line - if data[0] == '|' { - colCount-- - } - { - tmp := header - // remove whitespace from the end - for len(tmp) > 0 { - lastIdx := len(tmp) - 1 - if tmp[lastIdx] == '\n' || tmp[lastIdx] == ' ' { - tmp = tmp[:lastIdx] - } else { - break - } - } - n := len(tmp) - if n > 2 && tmp[n-1] == '|' && !isBackslashEscaped(tmp, n-1) { - colCount-- - } - } - - // if the header looks like a underline, then we omit the header - // and parse the first line again as underline - if headerIsUnderline && !headerIsWithEmptyFields { - header = nil - i = 0 - } else { - i++ // move past newline - } - - columns = make([]ast.CellAlignFlags, colCount) - - // move on to the header underline - if i >= len(data) { - return - } - - if data[i] == '|' && !isBackslashEscaped(data, i) { - i++ - } - i = skipChar(data, i, ' ') - - // each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3 - // and trailing | optional on last column - col := 0 - n := len(data) - for i < n && data[i] != '\n' { - dashes := 0 - - if data[i] == ':' { - i++ - columns[col] |= ast.TableAlignmentLeft - dashes++ - } - for i < n && data[i] == '-' { - i++ - dashes++ - } - if i < n && data[i] == ':' { - i++ - columns[col] |= ast.TableAlignmentRight - dashes++ - } - for i < n && data[i] == ' ' { - i++ - } - if i == n { - return - } - // end of column test is messy - switch { - case dashes < 1: - // not a valid column - return - - case data[i] == '|' && !isBackslashEscaped(data, i): - // marker found, now skip past trailing whitespace - col++ - i++ - for i < n && data[i] == ' ' { - i++ - } - - // trailing junk found after last column - if col >= colCount && i < len(data) && data[i] != '\n' { - return - } - - case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount: - // something else found where marker was required - return - - case data[i] == '\n': - // marker is optional for the last column - col++ - - default: - // trailing junk found after last column - return - } - } - if col != colCount { - return - } - - if doRender { - table = &ast.Table{} - p.AddBlock(table) - if header != nil { - p.AddBlock(&ast.TableHeader{}) - p.tableRow(header, columns, true) - } - } - size = skipCharN(data, i, '\n', 1) - return -} - -/* -Table: - -Name | Age | Phone -------|-----|--------- -Bob | 31 | 555-1234 -Alice | 27 | 555-4321 -*/ -func (p *Parser) table(data []byte) int { - i, columns, table := p.tableHeader(data, true) - if i == 0 { - return 0 - } - - p.AddBlock(&ast.TableBody{}) - - for i < len(data) { - pipes, rowStart := 0, i - for ; i < len(data) && data[i] != '\n'; i++ { - if data[i] == '|' { - pipes++ - } - } - - if pipes == 0 { - i = rowStart - break - } - - // include the newline in data sent to tableRow - i = skipCharN(data, i, '\n', 1) - - if p.tableFooter(data[rowStart:i]) { - continue - } - - p.tableRow(data[rowStart:i], columns, false) - } - if captionContent, id, consumed := p.caption(data[i:], []byte(captionTable)); consumed > 0 { - caption := &ast.Caption{} - p.Inline(caption, captionContent) - - // Some switcheroo to re-insert the parsed table as a child of the captionfigure. - figure := &ast.CaptionFigure{} - figure.HeadingID = id - table2 := &ast.Table{} - // Retain any block level attributes. - table2.AsContainer().Attribute = table.AsContainer().Attribute - children := table.GetChildren() - ast.RemoveFromTree(table) - - table2.SetChildren(children) - ast.AppendChild(figure, table2) - ast.AppendChild(figure, caption) - - p.addChild(figure) - p.Finalize(figure) - - i += consumed - } - - return i -} diff --git a/vendor/github.com/gomarkdown/markdown/parser/callout.go b/vendor/github.com/gomarkdown/markdown/parser/callout.go deleted file mode 100644 index 15858aa971..0000000000 --- a/vendor/github.com/gomarkdown/markdown/parser/callout.go +++ /dev/null @@ -1,29 +0,0 @@ -package parser - -import ( - "bytes" - "strconv" -) - -// IsCallout detects a callout in the following format: <> Where N is a integer > 0. -func IsCallout(data []byte) (id []byte, consumed int) { - if !bytes.HasPrefix(data, []byte("<<")) { - return nil, 0 - } - start := 2 - end := bytes.Index(data[start:], []byte(">>")) - if end < 0 { - return nil, 0 - } - - b := data[start : start+end] - b = bytes.TrimSpace(b) - i, err := strconv.Atoi(string(b)) - if err != nil { - return nil, 0 - } - if i <= 0 { - return nil, 0 - } - return b, start + end + 2 // 2 for >> -} diff --git a/vendor/github.com/gomarkdown/markdown/parser/caption.go b/vendor/github.com/gomarkdown/markdown/parser/caption.go deleted file mode 100644 index 0879450464..0000000000 --- a/vendor/github.com/gomarkdown/markdown/parser/caption.go +++ /dev/null @@ -1,70 +0,0 @@ -package parser - -import ( - "bytes" -) - -// caption checks for a caption, it returns the caption data and a potential "headingID". -func (p *Parser) caption(data, caption []byte) ([]byte, string, int) { - if !bytes.HasPrefix(data, caption) { - return nil, "", 0 - } - j := len(caption) - data = data[j:] - end := LinesUntilEmpty(data) - - data = data[:end] - - id, start := captionID(data) - if id != "" { - return data[:start], id, end + j - } - - return data, "", end + j -} - -// LinesUntilEmpty scans lines up to the first empty line. -func LinesUntilEmpty(data []byte) int { - line, i := 0, 0 - - for line < len(data) { - i++ - - // find the end of this line - for i < len(data) && data[i-1] != '\n' { - i++ - } - - if IsEmpty(data[line:i]) == 0 { - line = i - continue - } - - break - } - return i -} - -// captionID checks if the caption *ends* in {#....}. If so the text after {# is taken to be -// the ID/anchor of the entire figure block. -func captionID(data []byte) (string, int) { - end := len(data) - - j, k := 0, 0 - // find start/end of heading id - for j = 0; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ { - } - for k = j + 1; k < end && data[k] != '}'; k++ { - } - // remains must be whitespace. - for l := k + 1; l < end; l++ { - if !IsSpace(data[l]) { - return "", 0 - } - } - - if j > 0 && k > 0 && j+2 < k { - return string(data[j+2 : k]), j - } - return "", 0 -} diff --git a/vendor/github.com/gomarkdown/markdown/parser/citation.go b/vendor/github.com/gomarkdown/markdown/parser/citation.go deleted file mode 100644 index 217d4def0b..0000000000 --- a/vendor/github.com/gomarkdown/markdown/parser/citation.go +++ /dev/null @@ -1,91 +0,0 @@ -package parser - -import ( - "bytes" - - "github.com/gomarkdown/markdown/ast" -) - -// citation parses a citation. In its most simple form [@ref], we allow multiple -// being separated by semicolons and a sub reference inside ala pandoc: [@ref, p. 23]. -// Each citation can have a modifier: !, ? or - wich mean: -// -// ! - normative -// ? - formative -// - - suppressed -// -// The suffix starts after a comma, we strip any whitespace before and after. If the output -// allows for it, this can be rendered. -func citation(p *Parser, data []byte, offset int) (int, ast.Node) { - // look for the matching closing bracket - i := offset + 1 - for level := 1; level > 0 && i < len(data); i++ { - switch { - case data[i] == '\n': - // no newlines allowed. - return 0, nil - - case data[i-1] == '\\': - continue - - case data[i] == '[': - level++ - - case data[i] == ']': - level-- - if level <= 0 { - i-- // compensate for extra i++ in for loop - } - } - } - - if i >= len(data) { - return 0, nil - } - - node := &ast.Citation{} - - citations := bytes.Split(data[1:i], []byte(";")) - for _, citation := range citations { - var suffix []byte - citation = bytes.TrimSpace(citation) - j := 0 - if citation[j] != '@' { - // not a citation, drop out entirely. - return 0, nil - } - if c := bytes.Index(citation, []byte(",")); c > 0 { - part := citation[:c] - suff := citation[c+1:] - part = bytes.TrimSpace(part) - suff = bytes.TrimSpace(suff) - - citation = part - suffix = suff - } - - citeType := ast.CitationTypeInformative - - if len(citation) < 2 { - continue - } - - j = 1 - switch citation[j] { - case '!': - citeType = ast.CitationTypeNormative - j++ - case '?': - citeType = ast.CitationTypeInformative - j++ - case '-': - citeType = ast.CitationTypeSuppressed - j++ - } - node.Destination = append(node.Destination, citation[j:]) - node.Type = append(node.Type, citeType) - node.Suffix = append(node.Suffix, suffix) - } - - return i + 1, node -} diff --git a/vendor/github.com/gomarkdown/markdown/parser/esc.go b/vendor/github.com/gomarkdown/markdown/parser/esc.go deleted file mode 100644 index 0a79aa35ec..0000000000 --- a/vendor/github.com/gomarkdown/markdown/parser/esc.go +++ /dev/null @@ -1,20 +0,0 @@ -package parser - -// isEscape returns true if byte i is prefixed by an odd number of backslahses. -func isEscape(data []byte, i int) bool { - if i == 0 { - return false - } - if i == 1 { - return data[0] == '\\' - } - j := i - 1 - for ; j >= 0; j-- { - if data[j] != '\\' { - break - } - } - j++ - // odd number of backslahes means escape - return (i-j)%2 != 0 -} diff --git a/vendor/github.com/gomarkdown/markdown/parser/figures.go b/vendor/github.com/gomarkdown/markdown/parser/figures.go deleted file mode 100644 index 0566c16ecd..0000000000 --- a/vendor/github.com/gomarkdown/markdown/parser/figures.go +++ /dev/null @@ -1,117 +0,0 @@ -package parser - -import ( - "bytes" - - "github.com/gomarkdown/markdown/ast" -) - -// sFigureLine checks if there's a figure line (e.g., !--- ) at the beginning of data, -// and returns the end index if so, or 0 otherwise. -func sFigureLine(data []byte, oldmarker string) (end int, marker string) { - i, size := 0, 0 - - n := len(data) - // skip up to three spaces - for i < n && i < 3 && data[i] == ' ' { - i++ - } - - // check for the marker characters: ! - if i+1 >= n { - return 0, "" - } - if data[i] != '!' || data[i+1] != '-' { - return 0, "" - } - i++ - - c := data[i] // i.e. the - - - // the whole line must be the same char or whitespace - for i < n && data[i] == c { - size++ - i++ - } - - // the marker char must occur at least 3 times - if size < 3 { - return 0, "" - } - marker = string(data[i-size : i]) - - // if this is the end marker, it must match the beginning marker - if oldmarker != "" && marker != oldmarker { - return 0, "" - } - - // there is no syntax modifier although it might be an idea to re-use this space for something? - - i = skipChar(data, i, ' ') - if i >= n || data[i] != '\n' { - if i == n { - return i, marker - } - return 0, "" - } - return i + 1, marker // Take newline into account. -} - -// figureBlock returns the end index if data contains a figure block at the beginning, -// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects. -// If doRender is true, a final newline is mandatory to recognize the figure block. -func (p *Parser) figureBlock(data []byte, doRender bool) int { - beg, marker := sFigureLine(data, "") - if beg == 0 || beg >= len(data) { - return 0 - } - - var raw bytes.Buffer - - for { - // safe to assume beg < len(data) - - // check for the end of the code block - figEnd, _ := sFigureLine(data[beg:], marker) - if figEnd != 0 { - beg += figEnd - break - } - - // copy the current line - end := skipUntilChar(data, beg, '\n') + 1 - - // did we reach the end of the buffer without a closing marker? - if end >= len(data) { - return 0 - } - - // verbatim copy to the working buffer - if doRender { - raw.Write(data[beg:end]) - } - beg = end - } - - if !doRender { - return beg - } - - figure := &ast.CaptionFigure{} - p.AddBlock(figure) - p.Block(raw.Bytes()) - - defer p.Finalize(figure) - - if captionContent, id, consumed := p.caption(data[beg:], []byte("Figure: ")); consumed > 0 { - caption := &ast.Caption{} - p.Inline(caption, captionContent) - - figure.HeadingID = id - - p.addChild(caption) - - beg += consumed - } - return beg -} diff --git a/vendor/github.com/gomarkdown/markdown/parser/include.go b/vendor/github.com/gomarkdown/markdown/parser/include.go deleted file mode 100644 index 2448a68543..0000000000 --- a/vendor/github.com/gomarkdown/markdown/parser/include.go +++ /dev/null @@ -1,129 +0,0 @@ -package parser - -import ( - "bytes" - "path" - "path/filepath" -) - -// isInclude parses {{...}}[...], that contains a path between the {{, the [...] syntax contains -// an address to select which lines to include. It is treated as an opaque string and just given -// to readInclude. -func (p *Parser) isInclude(data []byte) (filename string, address []byte, consumed int) { - i := skipCharN(data, 0, ' ', 3) // start with up to 3 spaces - if len(data[i:]) < 3 { - return "", nil, 0 - } - if data[i] != '{' || data[i+1] != '{' { - return "", nil, 0 - } - start := i + 2 - - // find the end delimiter - i = skipUntilChar(data, i, '}') - if i+1 >= len(data) { - return "", nil, 0 - } - end := i - i++ - if data[i] != '}' { - return "", nil, 0 - } - filename = string(data[start:end]) - - if i+1 < len(data) && data[i+1] == '[' { // potential address specification - start := i + 2 - - end = skipUntilChar(data, start, ']') - if end >= len(data) { - return "", nil, 0 - } - address = data[start:end] - return filename, address, end + 1 - } - - return filename, address, i + 1 -} - -func (p *Parser) readInclude(from, file string, address []byte) []byte { - if p.Opts.ReadIncludeFn != nil { - return p.Opts.ReadIncludeFn(from, file, address) - } - - return nil -} - -// isCodeInclude parses <{{...}} which is similar to isInclude the returned bytes are, however wrapped in a code block. -func (p *Parser) isCodeInclude(data []byte) (filename string, address []byte, consumed int) { - i := skipCharN(data, 0, ' ', 3) // start with up to 3 spaces - if len(data[i:]) < 3 { - return "", nil, 0 - } - if data[i] != '<' { - return "", nil, 0 - } - start := i - - filename, address, consumed = p.isInclude(data[i+1:]) - if consumed == 0 { - return "", nil, 0 - } - return filename, address, start + consumed + 1 -} - -// readCodeInclude acts like include except the returned bytes are wrapped in a fenced code block. -func (p *Parser) readCodeInclude(from, file string, address []byte) []byte { - data := p.readInclude(from, file, address) - if data == nil { - return nil - } - ext := path.Ext(file) - buf := &bytes.Buffer{} - buf.Write([]byte("```")) - if ext != "" { // starts with a dot - buf.WriteString(" " + ext[1:] + "\n") - } else { - buf.WriteByte('\n') - } - buf.Write(data) - buf.WriteString("```\n") - return buf.Bytes() -} - -// incStack hold the current stack of chained includes. Each value is the containing -// path of the file being parsed. -type incStack struct { - stack []string -} - -func newIncStack() *incStack { - return &incStack{stack: []string{}} -} - -// Push updates i with new. -func (i *incStack) Push(new string) { - if path.IsAbs(new) { - i.stack = append(i.stack, path.Dir(new)) - return - } - last := "" - if len(i.stack) > 0 { - last = i.stack[len(i.stack)-1] - } - i.stack = append(i.stack, path.Dir(filepath.Join(last, new))) -} - -// Pop pops the last value. -func (i *incStack) Pop() { - if len(i.stack) == 0 { - return - } - i.stack = i.stack[:len(i.stack)-1] -} - -func (i *incStack) Last() string { - if len(i.stack) == 0 { - return "" - } - return i.stack[len(i.stack)-1] -} diff --git a/vendor/github.com/gomarkdown/markdown/parser/inline.go b/vendor/github.com/gomarkdown/markdown/parser/inline.go deleted file mode 100644 index d6c8a7204b..0000000000 --- a/vendor/github.com/gomarkdown/markdown/parser/inline.go +++ /dev/null @@ -1,1323 +0,0 @@ -package parser - -import ( - "bytes" - "regexp" - "strconv" - - "github.com/gomarkdown/markdown/ast" -) - -// Parsing of inline elements - -var ( - urlRe = `((https?|ftp):\/\/|\/)[-A-Za-z0-9+&@#\/%?=~_|!:,.;\(\)]+` - anchorRe = regexp.MustCompile(`^(]+")?\s?>` + urlRe + `<\/a>)`) - - // TODO: improve this regexp to catch all possible entities: - htmlEntityRe = regexp.MustCompile(`&[a-z]{2,5};`) -) - -// Inline parses text within a block. -// Each function returns the number of consumed chars. -func (p *Parser) Inline(currBlock ast.Node, data []byte) { - // handlers might call us recursively: enforce a maximum depth - if p.nesting >= p.maxNesting || len(data) == 0 { - return - } - p.nesting++ - beg, end := 0, 0 - - n := len(data) - for end < n { - handler := p.inlineCallback[data[end]] - if handler == nil { - end++ - continue - } - consumed, node := handler(p, data, end) - if consumed == 0 { - // no action from the callback - end++ - continue - } - // copy inactive chars into the output - ast.AppendChild(currBlock, newTextNode(data[beg:end])) - if node != nil { - ast.AppendChild(currBlock, node) - } - beg = end + consumed - end = beg - } - - if beg < n { - if data[end-1] == '\n' { - end-- - } - ast.AppendChild(currBlock, newTextNode(data[beg:end])) - } - p.nesting-- -} - -// single and double emphasis parsing -func emphasis(p *Parser, data []byte, offset int) (int, ast.Node) { - data = data[offset:] - c := data[0] - - n := len(data) - if n > 2 && data[1] != c { - // whitespace cannot follow an opening emphasis; - // strikethrough only takes two characters '~~' - if IsSpace(data[1]) { - return 0, nil - } - if p.extensions&SuperSubscript != 0 && c == '~' { - // potential subscript, no spaces, except when escaped, helperEmphasis does - // not check that for us, so walk the bytes and check. - ret := skipUntilChar(data[1:], 0, c) - if ret == 0 { - return 0, nil - } - ret++ // we started with data[1:] above. - for i := 1; i < ret; i++ { - if IsSpace(data[i]) && !isEscape(data, i) { - return 0, nil - } - } - sub := &ast.Subscript{} - sub.Literal = data[1:ret] - return ret + 1, sub - } - ret, node := helperEmphasis(p, data[1:], c) - if ret == 0 { - return 0, nil - } - - return ret + 1, node - } - - if n > 3 && data[1] == c && data[2] != c { - if IsSpace(data[2]) { - return 0, nil - } - ret, node := helperDoubleEmphasis(p, data[2:], c) - if ret == 0 { - return 0, nil - } - - return ret + 2, node - } - - if n > 4 && data[1] == c && data[2] == c && data[3] != c { - if c == '~' || IsSpace(data[3]) { - return 0, nil - } - ret, node := helperTripleEmphasis(p, data, 3, c) - if ret == 0 { - return 0, nil - } - - return ret + 3, node - } - - return 0, nil -} - -func codeSpan(p *Parser, data []byte, offset int) (int, ast.Node) { - data = data[offset:] - - // count the number of backticks in the delimiter - nb := skipChar(data, 0, '`') - - // find the next delimiter - i, end := 0, 0 - hasLFBeforeDelimiter := false - for end = nb; end < len(data) && i < nb; end++ { - if data[end] == '\n' { - hasLFBeforeDelimiter = true - } - if data[end] == '`' { - i++ - } else { - i = 0 - } - } - - // no matching delimiter? - if i < nb && end >= len(data) { - return 0, nil - } - - // If there are non-space chars after the ending delimiter and before a '\n', - // flag that this is not a well formed fenced code block. - hasCharsAfterDelimiter := false - for j := end; j < len(data); j++ { - if data[j] == '\n' { - break - } - if !IsSpace(data[j]) { - hasCharsAfterDelimiter = true - break - } - } - - // trim outside whitespace - fBegin := nb - for fBegin < end && data[fBegin] == ' ' { - fBegin++ - } - - fEnd := end - nb - for fEnd > fBegin && data[fEnd-1] == ' ' { - fEnd-- - } - - if fBegin == fEnd { - return end, nil - } - - // if delimiter has 3 backticks - if nb == 3 { - i := fBegin - syntaxStart, syntaxLen := syntaxRange(data, &i) - - // If we found a '\n' before the end marker and there are only spaces - // after the end marker, then this is a code block. - if hasLFBeforeDelimiter && !hasCharsAfterDelimiter { - codeblock := &ast.CodeBlock{ - IsFenced: true, - Info: data[syntaxStart : syntaxStart+syntaxLen], - } - codeblock.Literal = data[i:fEnd] - return end, codeblock - } - } - - // render the code span - code := &ast.Code{} - code.Literal = data[fBegin:fEnd] - return end, code -} - -// newline preceded by two spaces becomes
    -func maybeLineBreak(p *Parser, data []byte, offset int) (int, ast.Node) { - origOffset := offset - offset = skipChar(data, offset, ' ') - - if offset < len(data) && data[offset] == '\n' { - if offset-origOffset >= 2 { - return offset - origOffset + 1, &ast.Hardbreak{} - } - return offset - origOffset, nil - } - return 0, nil -} - -// newline without two spaces works when HardLineBreak is enabled -func lineBreak(p *Parser, data []byte, offset int) (int, ast.Node) { - if p.extensions&HardLineBreak != 0 { - return 1, &ast.Hardbreak{} - } - return 0, nil -} - -type linkType int - -const ( - linkNormal linkType = iota - linkImg - linkDeferredFootnote - linkInlineFootnote - linkCitation -) - -func isReferenceStyleLink(data []byte, pos int, t linkType) bool { - if t == linkDeferredFootnote { - return false - } - return pos < len(data)-1 && data[pos] == '[' && data[pos+1] != '^' -} - -func maybeImage(p *Parser, data []byte, offset int) (int, ast.Node) { - if offset < len(data)-1 && data[offset+1] == '[' { - return link(p, data, offset) - } - return 0, nil -} - -func maybeInlineFootnoteOrSuper(p *Parser, data []byte, offset int) (int, ast.Node) { - if offset < len(data)-1 && data[offset+1] == '[' { - return link(p, data, offset) - } - - if p.extensions&SuperSubscript != 0 { - ret := skipUntilChar(data[offset:], 1, '^') - if ret == 0 { - return 0, nil - } - for i := offset; i < offset+ret; i++ { - if IsSpace(data[i]) && !isEscape(data, i) { - return 0, nil - } - } - sup := &ast.Superscript{} - sup.Literal = data[offset+1 : offset+ret] - return ret + 1, sup - } - - return 0, nil -} - -// '[': parse a link or an image or a footnote or a citation -func link(p *Parser, data []byte, offset int) (int, ast.Node) { - // no links allowed inside regular links, footnote, and deferred footnotes - if p.insideLink && (offset > 0 && data[offset-1] == '[' || len(data)-1 > offset && data[offset+1] == '^') { - return 0, nil - } - - var t linkType - switch { - // special case: ![^text] == deferred footnote (that follows something with - // an exclamation point) - case p.extensions&Footnotes != 0 && len(data)-1 > offset && data[offset+1] == '^': - t = linkDeferredFootnote - // ![alt] == image - case offset >= 0 && data[offset] == '!': - t = linkImg - offset++ - // [@citation], [@-citation], [@?citation], [@!citation] - case p.extensions&Mmark != 0 && len(data)-1 > offset && data[offset+1] == '@': - t = linkCitation - // [text] == regular link - // ^[text] == inline footnote - // [^refId] == deferred footnote - case p.extensions&Footnotes != 0: - if offset >= 0 && data[offset] == '^' { - t = linkInlineFootnote - offset++ - } else if len(data)-1 > offset && data[offset+1] == '^' { - t = linkDeferredFootnote - } - default: - t = linkNormal - } - - data = data[offset:] - - if t == linkCitation { - return citation(p, data, 0) - } - - var ( - i = 1 - noteID int - title, link, linkID, altContent []byte - textHasNl = false - ) - - if t == linkDeferredFootnote { - i++ - } - - // look for the matching closing bracket - for level := 1; level > 0 && i < len(data); i++ { - switch { - case data[i] == '\n': - textHasNl = true - - case data[i-1] == '\\': - continue - - case data[i] == '[': - level++ - - case data[i] == ']': - level-- - if level <= 0 { - i-- // compensate for extra i++ in for loop - } - } - } - - if i >= len(data) { - return 0, nil - } - - txtE := i - i++ - var footnoteNode ast.Node - - // skip any amount of whitespace or newline - // (this is much more lax than original markdown syntax) - i = skipSpace(data, i) - - // inline style link - switch { - case i < len(data) && data[i] == '(': - // skip initial whitespace - i++ - - i = skipSpace(data, i) - - linkB := i - brace := 0 - - // look for link end: ' " ) - findlinkend: - for i < len(data) { - switch { - case data[i] == '\\': - i += 2 - - case data[i] == '(': - brace++ - i++ - - case data[i] == ')': - if brace <= 0 { - break findlinkend - } - brace-- - i++ - - case data[i] == '\'' || data[i] == '"': - break findlinkend - - default: - i++ - } - } - - if i >= len(data) { - return 0, nil - } - linkE := i - - // look for title end if present - titleB, titleE := 0, 0 - if data[i] == '\'' || data[i] == '"' { - i++ - titleB = i - titleEndCharFound := false - - findtitleend: - for i < len(data) { - switch { - case data[i] == '\\': - i++ - - case data[i] == data[titleB-1]: // matching title delimiter - titleEndCharFound = true - - case titleEndCharFound && data[i] == ')': - break findtitleend - } - i++ - } - - if i >= len(data) { - return 0, nil - } - - // skip whitespace after title - titleE = i - 1 - for titleE > titleB && IsSpace(data[titleE]) { - titleE-- - } - - // check for closing quote presence - if data[titleE] != '\'' && data[titleE] != '"' { - titleB, titleE = 0, 0 - linkE = i - } - } - - // remove whitespace at the end of the link - for linkE > linkB && IsSpace(data[linkE-1]) { - linkE-- - } - - // remove optional angle brackets around the link - if data[linkB] == '<' { - linkB++ - } - if data[linkE-1] == '>' { - linkE-- - } - - // build escaped link and title - if linkE > linkB { - link = data[linkB:linkE] - } - - if titleE > titleB { - title = data[titleB:titleE] - } - - i++ - - // reference style link - case isReferenceStyleLink(data, i, t): - var id []byte - altContentConsidered := false - - // look for the id - i++ - linkB := i - i = skipUntilChar(data, i, ']') - - if i >= len(data) { - return 0, nil - } - linkE := i - - // find the reference - if linkB == linkE { - if textHasNl { - var b bytes.Buffer - - for j := 1; j < txtE; j++ { - switch { - case data[j] != '\n': - b.WriteByte(data[j]) - case data[j-1] != ' ': - b.WriteByte(' ') - } - } - - id = b.Bytes() - } else { - id = data[1:txtE] - altContentConsidered = true - } - } else { - id = data[linkB:linkE] - } - - // find the reference with matching id - lr, ok := p.getRef(string(id)) - if !ok { - return 0, nil - } - - // keep link and title from reference - linkID = id - link = lr.link - title = lr.title - if altContentConsidered { - altContent = lr.text - } - i++ - - // shortcut reference style link or reference or inline footnote - default: - var id []byte - - // craft the id - if textHasNl { - var b bytes.Buffer - - for j := 1; j < txtE; j++ { - switch { - case data[j] != '\n': - b.WriteByte(data[j]) - case data[j-1] != ' ': - b.WriteByte(' ') - } - } - - id = b.Bytes() - } else { - if t == linkDeferredFootnote { - id = data[2:txtE] // get rid of the ^ - } else { - id = data[1:txtE] - } - } - - footnoteNode = &ast.ListItem{} - if t == linkInlineFootnote { - // create a new reference - noteID = len(p.notes) + 1 - - var fragment []byte - if len(id) > 0 { - if len(id) < 16 { - fragment = make([]byte, len(id)) - } else { - fragment = make([]byte, 16) - } - copy(fragment, slugify(id)) - } else { - fragment = append([]byte("footnote-"), []byte(strconv.Itoa(noteID))...) - } - - ref := &reference{ - noteID: noteID, - hasBlock: false, - link: fragment, - title: id, - footnote: footnoteNode, - } - - p.notes = append(p.notes, ref) - p.refsRecord[string(ref.link)] = struct{}{} - - link = ref.link - title = ref.title - } else { - // find the reference with matching id - lr, ok := p.getRef(string(id)) - if !ok { - return 0, nil - } - - if t == linkDeferredFootnote && !p.isFootnote(lr) { - lr.noteID = len(p.notes) + 1 - lr.footnote = footnoteNode - p.notes = append(p.notes, lr) - p.refsRecord[string(lr.link)] = struct{}{} - } - - // keep link and title from reference - link = lr.link - // if inline footnote, title == footnote contents - title = lr.title - noteID = lr.noteID - if len(lr.text) > 0 { - altContent = lr.text - } - } - - // rewind the whitespace - i = txtE + 1 - } - - var uLink []byte - if t == linkNormal || t == linkImg { - if len(link) > 0 { - var uLinkBuf bytes.Buffer - unescapeText(&uLinkBuf, link) - uLink = uLinkBuf.Bytes() - } - - // links need something to click on and somewhere to go - // [](http://bla) is legal in CommonMark, so allow txtE <=1 for linkNormal - // [bla]() is also legal in CommonMark, so allow empty uLink - } - - // call the relevant rendering function - switch t { - case linkNormal: - link := &ast.Link{ - Destination: normalizeURI(uLink), - Title: title, - DeferredID: linkID, - } - if len(altContent) > 0 { - ast.AppendChild(link, newTextNode(altContent)) - } else { - // links cannot contain other links, so turn off link parsing - // temporarily and recurse - insideLink := p.insideLink - p.insideLink = true - p.Inline(link, data[1:txtE]) - p.insideLink = insideLink - } - return i, link - - case linkImg: - image := &ast.Image{ - Destination: uLink, - Title: title, - } - ast.AppendChild(image, newTextNode(data[1:txtE])) - return i + 1, image - - case linkInlineFootnote, linkDeferredFootnote: - link := &ast.Link{ - Destination: link, - Title: title, - NoteID: noteID, - Footnote: footnoteNode, - } - if t == linkDeferredFootnote { - link.DeferredID = data[2:txtE] - } - if t == linkInlineFootnote { - i++ - } - return i, link - - default: - return 0, nil - } -} - -func (p *Parser) inlineHTMLComment(data []byte) int { - if len(data) < 5 { - return 0 - } - if data[0] != '<' || data[1] != '!' || data[2] != '-' || data[3] != '-' { - return 0 - } - i := 5 - // scan for an end-of-comment marker, across lines if necessary - for i < len(data) && !(data[i-2] == '-' && data[i-1] == '-' && data[i] == '>') { - i++ - } - // no end-of-comment marker - if i >= len(data) { - return 0 - } - return i + 1 -} - -func stripMailto(link []byte) []byte { - if bytes.HasPrefix(link, []byte("mailto://")) { - return link[9:] - } else if bytes.HasPrefix(link, []byte("mailto:")) { - return link[7:] - } else { - return link - } -} - -// autolinkType specifies a kind of autolink that gets detected. -type autolinkType int - -// These are the possible flag values for the autolink renderer. -const ( - notAutolink autolinkType = iota - normalAutolink - emailAutolink -) - -// '<' when tags or autolinks are allowed -func leftAngle(p *Parser, data []byte, offset int) (int, ast.Node) { - data = data[offset:] - - if p.extensions&Mmark != 0 { - id, consumed := IsCallout(data) - if consumed > 0 { - node := &ast.Callout{} - node.ID = id - return consumed, node - } - } - - altype, end := tagLength(data) - if size := p.inlineHTMLComment(data); size > 0 { - end = size - } - if end <= 2 { - return end, nil - } - if altype == notAutolink { - htmlTag := &ast.HTMLSpan{} - htmlTag.Literal = data[:end] - return end, htmlTag - } - - var uLink bytes.Buffer - unescapeText(&uLink, data[1:end+1-2]) - if uLink.Len() <= 0 { - return end, nil - } - link := uLink.Bytes() - node := &ast.Link{ - Destination: link, - } - if altype == emailAutolink { - node.Destination = append([]byte("mailto:"), link...) - } - ast.AppendChild(node, newTextNode(stripMailto(link))) - return end, node -} - -// '\\' backslash escape -var escapeChars = []byte("\\`*_{}[]()#+-.!:|&<>~^$") - -func escape(p *Parser, data []byte, offset int) (int, ast.Node) { - data = data[offset:] - - if len(data) <= 1 { - return 2, nil - } - - if p.extensions&NonBlockingSpace != 0 && data[1] == ' ' { - return 2, &ast.NonBlockingSpace{} - } - - if p.extensions&BackslashLineBreak != 0 && data[1] == '\n' { - return 2, &ast.Hardbreak{} - } - - if bytes.IndexByte(escapeChars, data[1]) < 0 { - return 0, nil - } - - return 2, newTextNode(data[1:2]) -} - -func unescapeText(ob *bytes.Buffer, src []byte) { - i := 0 - for i < len(src) { - org := i - for i < len(src) && src[i] != '\\' { - i++ - } - - if i > org { - ob.Write(src[org:i]) - } - - if i+1 >= len(src) { - break - } - - ob.WriteByte(src[i+1]) - i += 2 - } -} - -// '&' escaped when it doesn't belong to an entity -// valid entities are assumed to be anything matching &#?[A-Za-z0-9]+; -func entity(p *Parser, data []byte, offset int) (int, ast.Node) { - data = data[offset:] - - end := skipCharN(data, 1, '#', 1) - end = skipAlnum(data, end) - - if end < len(data) && data[end] == ';' { - end++ // real entity - } else { - return 0, nil // lone '&' - } - - ent := data[:end] - // undo & escaping or it will be converted to &amp; by another - // escaper in the renderer - if bytes.Equal(ent, []byte("&")) { - return end, newTextNode([]byte{'&'}) - } - if len(ent) < 4 { - return end, newTextNode(ent) - } - - // if ent consists solely out of numbers (hex or decimal) convert that unicode codepoint to actual rune - codepoint := uint64(0) - var err error - if ent[2] == 'x' || ent[2] == 'X' { // hexadecimal - codepoint, err = strconv.ParseUint(string(ent[3:len(ent)-1]), 16, 64) - } else { - codepoint, err = strconv.ParseUint(string(ent[2:len(ent)-1]), 10, 64) - } - if err == nil { // only if conversion was valid return here. - return end, newTextNode([]byte(string(rune(codepoint)))) - } - - return end, newTextNode(ent) -} - -func linkEndsWithEntity(data []byte, linkEnd int) bool { - entityRanges := htmlEntityRe.FindAllIndex(data[:linkEnd], -1) - return entityRanges != nil && entityRanges[len(entityRanges)-1][1] == linkEnd -} - -// hasPrefixCaseInsensitive is a custom implementation of -// -// strings.HasPrefix(strings.ToLower(s), prefix) -// -// we rolled our own because ToLower pulls in a huge machinery of lowercasing -// anything from Unicode and that's very slow. Since this func will only be -// used on ASCII protocol prefixes, we can take shortcuts. -func hasPrefixCaseInsensitive(s, prefix []byte) bool { - if len(s) < len(prefix) { - return false - } - delta := byte('a' - 'A') - for i, b := range prefix { - if b != s[i] && b != s[i]+delta { - return false - } - } - return true -} - -var protocolPrefixes = [][]byte{ - []byte("http://"), - []byte("https://"), - []byte("ftp://"), - []byte("file://"), - []byte("mailto:"), -} - -const shortestPrefix = 6 // len("ftp://"), the shortest of the above - -func maybeAutoLink(p *Parser, data []byte, offset int) (int, ast.Node) { - // quick check to rule out most false hits - if p.insideLink || len(data) < offset+shortestPrefix { - return 0, nil - } - for _, prefix := range protocolPrefixes { - endOfHead := offset + 8 // 8 is the len() of the longest prefix - if endOfHead > len(data) { - endOfHead = len(data) - } - if hasPrefixCaseInsensitive(data[offset:endOfHead], prefix) { - return autoLink(p, data, offset) - } - } - return 0, nil -} - -func autoLink(p *Parser, data []byte, offset int) (int, ast.Node) { - // Now a more expensive check to see if we're not inside an anchor element - anchorStart := offset - offsetFromAnchor := 0 - for anchorStart > 0 && data[anchorStart] != '<' { - anchorStart-- - offsetFromAnchor++ - } - - anchorStr := anchorRe.Find(data[anchorStart:]) - if anchorStr != nil { - anchorClose := &ast.HTMLSpan{} - anchorClose.Literal = anchorStr[offsetFromAnchor:] - return len(anchorStr) - offsetFromAnchor, anchorClose - } - - // scan backward for a word boundary - rewind := 0 - for offset-rewind > 0 && rewind <= 7 && IsLetter(data[offset-rewind-1]) { - rewind++ - } - if rewind > 6 { // longest supported protocol is "mailto" which has 6 letters - return 0, nil - } - - origData := data - data = data[offset-rewind:] - - isSafeURL := p.IsSafeURLOverride - if isSafeURL == nil { - isSafeURL = IsSafeURL - } - if !isSafeURL(data) { - return 0, nil - } - - linkEnd := 0 - for linkEnd < len(data) && !isEndOfLink(data[linkEnd]) { - linkEnd++ - } - - // Skip punctuation at the end of the link - if (data[linkEnd-1] == '.' || data[linkEnd-1] == ',') && data[linkEnd-2] != '\\' { - linkEnd-- - } - - // But don't skip semicolon if it's a part of escaped entity: - if data[linkEnd-1] == ';' && data[linkEnd-2] != '\\' && !linkEndsWithEntity(data, linkEnd) { - linkEnd-- - } - - // See if the link finishes with a punctuation sign that can be closed. - var copen byte - switch data[linkEnd-1] { - case '"': - copen = '"' - case '\'': - copen = '\'' - case ')': - copen = '(' - case ']': - copen = '[' - case '}': - copen = '{' - default: - copen = 0 - } - - if copen != 0 { - bufEnd := offset - rewind + linkEnd - 2 - - openDelim := 1 - - /* Try to close the final punctuation sign in this same line; - * if we managed to close it outside of the URL, that means that it's - * not part of the URL. If it closes inside the URL, that means it - * is part of the URL. - * - * Examples: - * - * foo http://www.pokemon.com/Pikachu_(Electric) bar - * => http://www.pokemon.com/Pikachu_(Electric) - * - * foo (http://www.pokemon.com/Pikachu_(Electric)) bar - * => http://www.pokemon.com/Pikachu_(Electric) - * - * foo http://www.pokemon.com/Pikachu_(Electric)) bar - * => http://www.pokemon.com/Pikachu_(Electric)) - * - * (foo http://www.pokemon.com/Pikachu_(Electric)) bar - * => foo http://www.pokemon.com/Pikachu_(Electric) - */ - - for bufEnd >= 0 && origData[bufEnd] != '\n' && openDelim != 0 { - if origData[bufEnd] == data[linkEnd-1] { - openDelim++ - } - - if origData[bufEnd] == copen { - openDelim-- - } - - bufEnd-- - } - - if openDelim == 0 { - linkEnd-- - } - } - - var uLink bytes.Buffer - unescapeText(&uLink, data[:linkEnd]) - - if uLink.Len() > 0 { - node := &ast.Link{ - Destination: uLink.Bytes(), - } - ast.AppendChild(node, newTextNode(uLink.Bytes())) - return linkEnd, node - } - - return linkEnd, nil -} - -func isEndOfLink(char byte) bool { - return IsSpace(char) || char == '<' -} - -// return the length of the given tag, or 0 is it's not valid -func tagLength(data []byte) (autolink autolinkType, end int) { - var i, j int - - // a valid tag can't be shorter than 3 chars - if len(data) < 3 { - return notAutolink, 0 - } - - // begins with a '<' optionally followed by '/', followed by letter or number - if data[0] != '<' { - return notAutolink, 0 - } - if data[1] == '/' { - i = 2 - } else { - i = 1 - } - - if !IsAlnum(data[i]) { - return notAutolink, 0 - } - - // scheme test - autolink = notAutolink - - // try to find the beginning of an URI - for i < len(data) && (IsAlnum(data[i]) || data[i] == '.' || data[i] == '+' || data[i] == '-') { - i++ - } - - if i > 1 && i < len(data) && data[i] == '@' { - if j = isMailtoAutoLink(data[i:]); j != 0 { - return emailAutolink, i + j - } - } - - if i > 2 && i < len(data) && data[i] == ':' { - autolink = normalAutolink - i++ - } - - // complete autolink test: no whitespace or ' or " - switch { - case i >= len(data): - autolink = notAutolink - case autolink != notAutolink: - j = i - - for i < len(data) { - if data[i] == '\\' { - i += 2 - } else if data[i] == '>' || data[i] == '\'' || data[i] == '"' || IsSpace(data[i]) { - break - } else { - i++ - } - - } - - if i >= len(data) { - return autolink, 0 - } - if i > j && data[i] == '>' { - return autolink, i + 1 - } - - // one of the forbidden chars has been found - autolink = notAutolink - } - i += bytes.IndexByte(data[i:], '>') - if i < 0 { - return autolink, 0 - } - return autolink, i + 1 -} - -// look for the address part of a mail autolink and '>' -// this is less strict than the original markdown e-mail address matching -func isMailtoAutoLink(data []byte) int { - nb := 0 - - // address is assumed to be: [-@._a-zA-Z0-9]+ with exactly one '@' - for i, c := range data { - if IsAlnum(c) { - continue - } - - switch c { - case '@': - nb++ - - case '-', '.', '_': - // no-op but not defult - - case '>': - if nb == 1 { - return i + 1 - } - return 0 - default: - return 0 - } - } - - return 0 -} - -// look for the next emph char, skipping other constructs -func helperFindEmphChar(data []byte, c byte) int { - i := 0 - - for i < len(data) { - for i < len(data) && data[i] != c && data[i] != '`' && data[i] != '[' { - i++ - } - if i >= len(data) { - return 0 - } - // do not count escaped chars - if i != 0 && data[i-1] == '\\' { - i++ - continue - } - if data[i] == c { - return i - } - - if data[i] == '`' { - // skip a code span - tmpI := 0 - i++ - for i < len(data) && data[i] != '`' { - if tmpI == 0 && data[i] == c { - tmpI = i - } - i++ - } - if i >= len(data) { - return tmpI - } - i++ - } else if data[i] == '[' { - // skip a link - tmpI := 0 - i++ - for i < len(data) && data[i] != ']' { - if tmpI == 0 && data[i] == c { - tmpI = i - } - i++ - } - i++ - for i < len(data) && (data[i] == ' ' || data[i] == '\n') { - i++ - } - if i >= len(data) { - return tmpI - } - if data[i] != '[' && data[i] != '(' { // not a link - if tmpI > 0 { - return tmpI - } - continue - } - cc := data[i] - i++ - for i < len(data) && data[i] != cc { - if tmpI == 0 && data[i] == c { - return i - } - i++ - } - if i >= len(data) { - return tmpI - } - i++ - } - } - return 0 -} - -func helperEmphasis(p *Parser, data []byte, c byte) (int, ast.Node) { - i := 0 - - // skip one symbol if coming from emph3 - if len(data) > 1 && data[0] == c && data[1] == c { - i = 1 - } - - for i < len(data) { - length := helperFindEmphChar(data[i:], c) - if length == 0 { - return 0, nil - } - i += length - if i >= len(data) { - return 0, nil - } - - if i+1 < len(data) && data[i+1] == c { - i++ - continue - } - - if data[i] == c && !IsSpace(data[i-1]) { - - if p.extensions&NoIntraEmphasis != 0 { - if !(i+1 == len(data) || IsSpace(data[i+1]) || IsPunctuation(data[i+1])) { - continue - } - } - - emph := &ast.Emph{} - p.Inline(emph, data[:i]) - return i + 1, emph - } - } - - return 0, nil -} - -func helperDoubleEmphasis(p *Parser, data []byte, c byte) (int, ast.Node) { - i := 0 - - for i < len(data) { - length := helperFindEmphChar(data[i:], c) - if length == 0 { - return 0, nil - } - i += length - - if i+1 < len(data) && data[i] == c && data[i+1] == c && i > 0 && !IsSpace(data[i-1]) { - var node ast.Node = &ast.Strong{} - if c == '~' { - node = &ast.Del{} - } - p.Inline(node, data[:i]) - return i + 2, node - } - i++ - } - return 0, nil -} - -func helperTripleEmphasis(p *Parser, data []byte, offset int, c byte) (int, ast.Node) { - i := 0 - origData := data - data = data[offset:] - - for i < len(data) { - length := helperFindEmphChar(data[i:], c) - if length == 0 { - return 0, nil - } - i += length - - // skip whitespace preceded symbols - if data[i] != c || IsSpace(data[i-1]) { - continue - } - - switch { - case i+2 < len(data) && data[i+1] == c && data[i+2] == c: - // triple symbol found - strong := &ast.Strong{} - em := &ast.Emph{} - ast.AppendChild(strong, em) - p.Inline(em, data[:i]) - return i + 3, strong - case i+1 < len(data) && data[i+1] == c: - // double symbol found, hand over to emph1 - length, node := helperEmphasis(p, origData[offset-2:], c) - if length == 0 { - return 0, nil - } - return length - 2, node - default: - // single symbol found, hand over to emph2 - length, node := helperDoubleEmphasis(p, origData[offset-1:], c) - if length == 0 { - return 0, nil - } - return length - 1, node - } - } - return 0, nil -} - -// math handle inline math wrapped with '$' -func math(p *Parser, data []byte, offset int) (int, ast.Node) { - data = data[offset:] - - // too short, or block math - if len(data) <= 2 || data[1] == '$' { - return 0, nil - } - - // find next '$' - var end int - for end = 1; end < len(data) && data[end] != '$'; end++ { - } - - // $ not match - if end == len(data) { - return 0, nil - } - - // create inline math node - math := &ast.Math{} - math.Literal = data[1:end] - return end + 1, math -} - -func newTextNode(d []byte) *ast.Text { - return &ast.Text{Leaf: ast.Leaf{Literal: d}} -} - -func normalizeURI(s []byte) []byte { - return s // TODO: implement -} diff --git a/vendor/github.com/gomarkdown/markdown/parser/matter.go b/vendor/github.com/gomarkdown/markdown/parser/matter.go deleted file mode 100644 index df2842375b..0000000000 --- a/vendor/github.com/gomarkdown/markdown/parser/matter.go +++ /dev/null @@ -1,36 +0,0 @@ -package parser - -import ( - "bytes" - - "github.com/gomarkdown/markdown/ast" -) - -func (p *Parser) documentMatter(data []byte) int { - if data[0] != '{' { - return 0 - } - - consumed := 0 - matter := ast.DocumentMatterNone - if bytes.HasPrefix(data, []byte("{frontmatter}")) { - consumed = len("{frontmatter}") - matter = ast.DocumentMatterFront - } - if bytes.HasPrefix(data, []byte("{mainmatter}")) { - consumed = len("{mainmatter}") - matter = ast.DocumentMatterMain - } - if bytes.HasPrefix(data, []byte("{backmatter}")) { - consumed = len("{backmatter}") - matter = ast.DocumentMatterBack - } - if consumed == 0 { - return 0 - } - node := &ast.DocumentMatter{Matter: matter} - p.AddBlock(node) - p.Finalize(node) - - return consumed -} diff --git a/vendor/github.com/gomarkdown/markdown/parser/options.go b/vendor/github.com/gomarkdown/markdown/parser/options.go deleted file mode 100644 index d3d0c0887c..0000000000 --- a/vendor/github.com/gomarkdown/markdown/parser/options.go +++ /dev/null @@ -1,32 +0,0 @@ -package parser - -import ( - "github.com/gomarkdown/markdown/ast" -) - -// Flags control optional behavior of parser. -type Flags int - -// Options is a collection of supplementary parameters tweaking the behavior of various parts of the parser. -type Options struct { - ParserHook BlockFunc - ReadIncludeFn ReadIncludeFunc - - Flags Flags // Flags allow customizing parser's behavior -} - -// Parser renderer configuration options. -const ( - FlagsNone Flags = 0 - SkipFootnoteList Flags = 1 << iota // Skip adding the footnote list (regardless if they are parsed) -) - -// BlockFunc allows to registration of a parser function. If successful it -// returns an ast.Node, a buffer that should be parsed as a block and the the number of bytes consumed. -type BlockFunc func(data []byte) (ast.Node, []byte, int) - -// ReadIncludeFunc should read the file under path and returns the read bytes, -// from will be set to the name of the current file being parsed. Initially -// this will be empty. address is the optional address specifier of which lines -// of the file to return. If this function is not set no data will be read. -type ReadIncludeFunc func(from, path string, address []byte) []byte diff --git a/vendor/github.com/gomarkdown/markdown/parser/parser.go b/vendor/github.com/gomarkdown/markdown/parser/parser.go deleted file mode 100644 index f0a0d9cd99..0000000000 --- a/vendor/github.com/gomarkdown/markdown/parser/parser.go +++ /dev/null @@ -1,933 +0,0 @@ -/* -Package parser implements parser for markdown text that generates AST (abstract syntax tree). -*/ -package parser - -import ( - "bytes" - "fmt" - "strconv" - "strings" - - "github.com/gomarkdown/markdown/ast" -) - -// Extensions is a bitmask of enabled parser extensions. -type Extensions int - -// Bit flags representing markdown parsing extensions. -// Use | (or) to specify multiple extensions. -const ( - NoExtensions Extensions = 0 - NoIntraEmphasis Extensions = 1 << iota // Ignore emphasis markers inside words - Tables // Parse tables - FencedCode // Parse fenced code blocks - Autolink // Detect embedded URLs that are not explicitly marked - Strikethrough // Strikethrough text using ~~test~~ - LaxHTMLBlocks // Loosen up HTML block parsing rules - SpaceHeadings // Be strict about prefix heading rules - HardLineBreak // Translate newlines into line breaks - NonBlockingSpace // Translate backspace spaces into line non-blocking spaces - TabSizeEight // Expand tabs to eight spaces instead of four - Footnotes // Pandoc-style footnotes - NoEmptyLineBeforeBlock // No need to insert an empty line to start a (code, quote, ordered list, unordered list) block - HeadingIDs // specify heading IDs with {#id} - Titleblock // Titleblock ala pandoc - AutoHeadingIDs // Create the heading ID from the text - BackslashLineBreak // Translate trailing backslashes into line breaks - DefinitionLists // Parse definition lists - MathJax // Parse MathJax - OrderedListStart // Keep track of the first number used when starting an ordered list. - Attributes // Block Attributes - SuperSubscript // Super- and subscript support: 2^10^, H~2~O. - EmptyLinesBreakList // 2 empty lines break out of list - Includes // Support including other files. - Mmark // Support Mmark syntax, see https://mmark.miek.nl/post/syntax/ - - CommonExtensions Extensions = NoIntraEmphasis | Tables | FencedCode | - Autolink | Strikethrough | SpaceHeadings | HeadingIDs | - BackslashLineBreak | DefinitionLists | MathJax -) - -// The size of a tab stop. -const ( - tabSizeDefault = 4 - tabSizeDouble = 8 -) - -// for each character that triggers a response when parsing inline data. -type inlineParser func(p *Parser, data []byte, offset int) (int, ast.Node) - -// ReferenceOverrideFunc is expected to be called with a reference string and -// return either a valid Reference type that the reference string maps to or -// nil. If overridden is false, the default reference logic will be executed. -// See the documentation in Options for more details on use-case. -type ReferenceOverrideFunc func(reference string) (ref *Reference, overridden bool) - -// Parser is a type that holds extensions and the runtime state used by -// Parse, and the renderer. You can not use it directly, construct it with New. -type Parser struct { - - // ReferenceOverride is an optional function callback that is called every - // time a reference is resolved. It can be set before starting parsing. - // - // In Markdown, the link reference syntax can be made to resolve a link to - // a reference instead of an inline URL, in one of the following ways: - // - // * [link text][refid] - // * [refid][] - // - // Usually, the refid is defined at the bottom of the Markdown document. If - // this override function is provided, the refid is passed to the override - // function first, before consulting the defined refids at the bottom. If - // the override function indicates an override did not occur, the refids at - // the bottom will be used to fill in the link details. - ReferenceOverride ReferenceOverrideFunc - - // IsSafeURLOverride allows overriding the default URL matcher. URL is - // safe if the overriding function returns true. Can be used to extend - // the default list of safe URLs. - IsSafeURLOverride func(url []byte) bool - - Opts Options - - // after parsing, this is AST root of parsed markdown text - Doc ast.Node - - extensions Extensions - - refs map[string]*reference - refsRecord map[string]struct{} - inlineCallback [256]inlineParser - nesting int - maxNesting int - insideLink bool - indexCnt int // incremented after every index - - // Footnotes need to be ordered as well as available to quickly check for - // presence. If a ref is also a footnote, it's stored both in refs and here - // in notes. Slice is nil if footnotes not enabled. - notes []*reference - - tip ast.Node // = doc - oldTip ast.Node - lastMatchedContainer ast.Node // = doc - allClosed bool - - // Attributes are attached to block level elements. - attr *ast.Attribute - - includeStack *incStack - - // collect headings where we auto-generated id so that we can - // ensure they are unique at the end - allHeadingsWithAutoID []*ast.Heading -} - -// New creates a markdown parser with CommonExtensions. -// -// You can then call `doc := p.Parse(markdown)` to parse markdown document -// and `markdown.Render(doc, renderer)` to convert it to another format with -// a renderer. -func New() *Parser { - return NewWithExtensions(CommonExtensions) -} - -// NewWithExtensions creates a markdown parser with given extensions. -func NewWithExtensions(extension Extensions) *Parser { - p := Parser{ - refs: make(map[string]*reference), - refsRecord: make(map[string]struct{}), - maxNesting: 16, - insideLink: false, - Doc: &ast.Document{}, - extensions: extension, - allClosed: true, - includeStack: newIncStack(), - } - p.tip = p.Doc - p.oldTip = p.Doc - p.lastMatchedContainer = p.Doc - - p.inlineCallback[' '] = maybeLineBreak - p.inlineCallback['*'] = emphasis - p.inlineCallback['_'] = emphasis - if p.extensions&Strikethrough != 0 { - p.inlineCallback['~'] = emphasis - } - p.inlineCallback['`'] = codeSpan - p.inlineCallback['\n'] = lineBreak - p.inlineCallback['['] = link - p.inlineCallback['<'] = leftAngle - p.inlineCallback['\\'] = escape - p.inlineCallback['&'] = entity - p.inlineCallback['!'] = maybeImage - if p.extensions&Mmark != 0 { - p.inlineCallback['('] = maybeShortRefOrIndex - } - p.inlineCallback['^'] = maybeInlineFootnoteOrSuper - if p.extensions&Autolink != 0 { - p.inlineCallback['h'] = maybeAutoLink - p.inlineCallback['m'] = maybeAutoLink - p.inlineCallback['f'] = maybeAutoLink - p.inlineCallback['H'] = maybeAutoLink - p.inlineCallback['M'] = maybeAutoLink - p.inlineCallback['F'] = maybeAutoLink - } - if p.extensions&MathJax != 0 { - p.inlineCallback['$'] = math - } - - return &p -} - -func (p *Parser) RegisterInline(n byte, fn inlineParser) inlineParser { - prev := p.inlineCallback[n] - p.inlineCallback[n] = fn - return prev -} - -func (p *Parser) getRef(refid string) (ref *reference, found bool) { - if p.ReferenceOverride != nil { - r, overridden := p.ReferenceOverride(refid) - if overridden { - if r == nil { - return nil, false - } - return &reference{ - link: []byte(r.Link), - title: []byte(r.Title), - noteID: 0, - hasBlock: false, - text: []byte(r.Text)}, true - } - } - // refs are case insensitive - ref, found = p.refs[strings.ToLower(refid)] - return ref, found -} - -func (p *Parser) isFootnote(ref *reference) bool { - _, ok := p.refsRecord[string(ref.link)] - return ok -} - -func (p *Parser) Finalize(block ast.Node) { - p.tip = block.GetParent() -} - -func (p *Parser) addChild(node ast.Node) ast.Node { - for !canNodeContain(p.tip, node) { - p.Finalize(p.tip) - } - ast.AppendChild(p.tip, node) - p.tip = node - return node -} - -func canNodeContain(n ast.Node, v ast.Node) bool { - switch n.(type) { - case *ast.List: - return isListItem(v) - case *ast.Document, *ast.BlockQuote, *ast.Aside, *ast.ListItem, *ast.CaptionFigure: - return !isListItem(v) - case *ast.Table: - switch v.(type) { - case *ast.TableHeader, *ast.TableBody, *ast.TableFooter: - return true - default: - return false - } - case *ast.TableHeader, *ast.TableBody, *ast.TableFooter: - _, ok := v.(*ast.TableRow) - return ok - case *ast.TableRow: - _, ok := v.(*ast.TableCell) - return ok - } - // for nodes implemented outside of ast package, allow them - // to implement this logic via CanContain interface - if o, ok := n.(ast.CanContain); ok { - return o.CanContain(v) - } - // for container nodes outside of ast package default to true - // because false is a bad default - typ := fmt.Sprintf("%T", n) - customNode := !strings.HasPrefix(typ, "*ast.") - if customNode { - return n.AsLeaf() == nil - } - return false -} - -func (p *Parser) closeUnmatchedBlocks() { - if p.allClosed { - return - } - for p.oldTip != p.lastMatchedContainer { - parent := p.oldTip.GetParent() - p.Finalize(p.oldTip) - p.oldTip = parent - } - p.allClosed = true -} - -// Reference represents the details of a link. -// See the documentation in Options for more details on use-case. -type Reference struct { - // Link is usually the URL the reference points to. - Link string - // Title is the alternate text describing the link in more detail. - Title string - // Text is the optional text to override the ref with if the syntax used was - // [refid][] - Text string -} - -// Parse generates AST (abstract syntax tree) representing markdown document. -// -// The result is a root of the tree whose underlying type is *ast.Document -// -// You can then convert AST to html using html.Renderer, to some other format -// using a custom renderer or transform the tree. -func (p *Parser) Parse(input []byte) ast.Node { - // the code only works with Unix CR newlines so to make life easy for - // callers normalize newlines - input = NormalizeNewlines(input) - - p.Block(input) - // Walk the tree and finish up some of unfinished blocks - for p.tip != nil { - p.Finalize(p.tip) - } - // Walk the tree again and process inline markdown in each block - ast.WalkFunc(p.Doc, func(node ast.Node, entering bool) ast.WalkStatus { - switch node.(type) { - case *ast.Paragraph, *ast.Heading, *ast.TableCell: - p.Inline(node, node.AsContainer().Content) - node.AsContainer().Content = nil - } - return ast.GoToNext - }) - - if p.Opts.Flags&SkipFootnoteList == 0 { - p.parseRefsToAST() - } - - // ensure HeadingIDs generated with AutoHeadingIDs are unique - // this is delayed here (as opposed to done when we create the id) - // so that we can preserve more original ids when there are conflicts - taken := map[string]bool{} - for _, h := range p.allHeadingsWithAutoID { - id := h.HeadingID - if id == "" { - continue - } - n := 0 - for taken[id] { - n++ - id = h.HeadingID + "-" + strconv.Itoa(n) - } - h.HeadingID = id - taken[id] = true - } - - return p.Doc -} - -func (p *Parser) parseRefsToAST() { - if p.extensions&Footnotes == 0 || len(p.notes) == 0 { - return - } - p.tip = p.Doc - list := &ast.List{ - IsFootnotesList: true, - ListFlags: ast.ListTypeOrdered, - } - p.AddBlock(&ast.Footnotes{}) - block := p.AddBlock(list) - flags := ast.ListItemBeginningOfList - // Note: this loop is intentionally explicit, not range-form. This is - // because the body of the loop will append nested footnotes to p.notes and - // we need to process those late additions. Range form would only walk over - // the fixed initial set. - for i := 0; i < len(p.notes); i++ { - ref := p.notes[i] - p.addChild(ref.footnote) - block := ref.footnote - listItem := block.(*ast.ListItem) - listItem.ListFlags = flags | ast.ListTypeOrdered - listItem.RefLink = ref.link - if ref.hasBlock { - flags |= ast.ListItemContainsBlock - p.Block(ref.title) - } else { - p.Inline(block, ref.title) - } - flags &^= ast.ListItemBeginningOfList | ast.ListItemContainsBlock - } - above := list.Parent - finalizeList(list) - p.tip = above - - ast.WalkFunc(block, func(node ast.Node, entering bool) ast.WalkStatus { - switch node.(type) { - case *ast.Paragraph, *ast.Heading: - p.Inline(node, node.AsContainer().Content) - node.AsContainer().Content = nil - } - return ast.GoToNext - }) -} - -// -// Link references -// -// This section implements support for references that (usually) appear -// as footnotes in a document, and can be referenced anywhere in the document. -// The basic format is: -// -// [1]: http://www.google.com/ "Google" -// [2]: http://www.github.com/ "Github" -// -// Anywhere in the document, the reference can be linked by referring to its -// label, i.e., 1 and 2 in this example, as in: -// -// This library is hosted on [Github][2], a git hosting site. -// -// Actual footnotes as specified in Pandoc and supported by some other Markdown -// libraries such as php-markdown are also taken care of. They look like this: -// -// This sentence needs a bit of further explanation.[^note] -// -// [^note]: This is the explanation. -// -// Footnotes should be placed at the end of the document in an ordered list. -// Inline footnotes such as: -// -// Inline footnotes^[Not supported.] also exist. -// -// are not yet supported. - -// reference holds all information necessary for a reference-style links or -// footnotes. -// -// Consider this markdown with reference-style links: -// -// [link][ref] -// -// [ref]: /url/ "tooltip title" -// -// It will be ultimately converted to this HTML: -// -//

    link

    -// -// And a reference structure will be populated as follows: -// -// p.refs["ref"] = &reference{ -// link: "/url/", -// title: "tooltip title", -// } -// -// Alternatively, reference can contain information about a footnote. Consider -// this markdown: -// -// Text needing a footnote.[^a] -// -// [^a]: This is the note -// -// A reference structure will be populated as follows: -// -// p.refs["a"] = &reference{ -// link: "a", -// title: "This is the note", -// noteID: , -// } -// -// TODO: As you can see, it begs for splitting into two dedicated structures -// for refs and for footnotes. -type reference struct { - link []byte - title []byte - noteID int // 0 if not a footnote ref - hasBlock bool - footnote ast.Node // a link to the Item node within a list of footnotes - - text []byte // only gets populated by refOverride feature with Reference.Text -} - -func (r *reference) String() string { - return fmt.Sprintf("{link: %q, title: %q, text: %q, noteID: %d, hasBlock: %v}", - r.link, r.title, r.text, r.noteID, r.hasBlock) -} - -// Check whether or not data starts with a reference link. -// If so, it is parsed and stored in the list of references -// (in the render struct). -// Returns the number of bytes to skip to move past it, -// or zero if the first line is not a reference. -func isReference(p *Parser, data []byte, tabSize int) int { - // up to 3 optional leading spaces - if len(data) < 4 { - return 0 - } - i := 0 - for i < 3 && data[i] == ' ' { - i++ - } - - noteID := 0 - - // id part: anything but a newline between brackets - if data[i] != '[' { - return 0 - } - i++ - if p.extensions&Footnotes != 0 { - if i < len(data) && data[i] == '^' { - // we can set it to anything here because the proper noteIds will - // be assigned later during the second pass. It just has to be != 0 - noteID = 1 - i++ - } - } - idOffset := i - for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' { - i++ - } - if i >= len(data) || data[i] != ']' { - return 0 - } - idEnd := i - // footnotes can have empty ID, like this: [^], but a reference can not be - // empty like this: []. Break early if it's not a footnote and there's no ID - if noteID == 0 && idOffset == idEnd { - return 0 - } - // spacer: colon (space | tab)* newline? (space | tab)* - i++ - if i >= len(data) || data[i] != ':' { - return 0 - } - i++ - for i < len(data) && (data[i] == ' ' || data[i] == '\t') { - i++ - } - if i < len(data) && (data[i] == '\n' || data[i] == '\r') { - i++ - if i < len(data) && data[i] == '\n' && data[i-1] == '\r' { - i++ - } - } - for i < len(data) && (data[i] == ' ' || data[i] == '\t') { - i++ - } - if i >= len(data) { - return 0 - } - - var ( - linkOffset, linkEnd int - titleOffset, titleEnd int - lineEnd int - raw []byte - hasBlock bool - ) - - if p.extensions&Footnotes != 0 && noteID != 0 { - linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize) - lineEnd = linkEnd - } else { - linkOffset, linkEnd, titleOffset, titleEnd, lineEnd = scanLinkRef(p, data, i) - } - if lineEnd == 0 { - return 0 - } - - // a valid ref has been found - - ref := &reference{ - noteID: noteID, - hasBlock: hasBlock, - } - - if noteID > 0 { - // reusing the link field for the id since footnotes don't have links - ref.link = data[idOffset:idEnd] - // if footnote, it's not really a title, it's the contained text - ref.title = raw - } else { - ref.link = data[linkOffset:linkEnd] - ref.title = data[titleOffset:titleEnd] - } - - // id matches are case-insensitive - id := string(bytes.ToLower(data[idOffset:idEnd])) - - p.refs[id] = ref - - return lineEnd -} - -func scanLinkRef(p *Parser, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) { - // link: whitespace-free sequence, optionally between angle brackets - if data[i] == '<' { - i++ - } - linkOffset = i - for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' { - i++ - } - linkEnd = i - if linkEnd < len(data) && data[linkOffset] == '<' && data[linkEnd-1] == '>' { - linkOffset++ - linkEnd-- - } - - // optional spacer: (space | tab)* (newline | '\'' | '"' | '(' ) - for i < len(data) && (data[i] == ' ' || data[i] == '\t') { - i++ - } - if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' { - return - } - - // compute end-of-line - if i >= len(data) || data[i] == '\r' || data[i] == '\n' { - lineEnd = i - } - if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' { - lineEnd++ - } - - // optional (space|tab)* spacer after a newline - if lineEnd > 0 { - i = lineEnd + 1 - for i < len(data) && (data[i] == ' ' || data[i] == '\t') { - i++ - } - } - - // optional title: any non-newline sequence enclosed in '"() alone on its line - if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') { - i++ - titleOffset = i - - // look for EOL - for i < len(data) && data[i] != '\n' && data[i] != '\r' { - i++ - } - if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' { - titleEnd = i + 1 - } else { - titleEnd = i - } - - // step back - i-- - for i > titleOffset && (data[i] == ' ' || data[i] == '\t') { - i-- - } - if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') { - lineEnd = titleEnd - titleEnd = i - } - } - - return -} - -// The first bit of this logic is the same as Parser.listItem, but the rest -// is much simpler. This function simply finds the entire block and shifts it -// over by one tab if it is indeed a block (just returns the line if it's not). -// blockEnd is the end of the section in the input buffer, and contents is the -// extracted text that was shifted over one tab. It will need to be rendered at -// the end of the document. -func scanFootnote(p *Parser, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) { - if i == 0 || len(data) == 0 { - return - } - - // skip leading whitespace on first line - for i < len(data) && data[i] == ' ' { - i++ - } - - blockStart = i - - // find the end of the line - blockEnd = i - for i < len(data) && data[i-1] != '\n' { - i++ - } - - // get working buffer - var raw bytes.Buffer - - // put the first line into the working buffer - raw.Write(data[blockEnd:i]) - blockEnd = i - - // process the following lines - containsBlankLine := false - -gatherLines: - for blockEnd < len(data) { - i++ - - // find the end of this line - for i < len(data) && data[i-1] != '\n' { - i++ - } - - // if it is an empty line, guess that it is part of this item - // and move on to the next line - if IsEmpty(data[blockEnd:i]) > 0 { - containsBlankLine = true - blockEnd = i - continue - } - - n := 0 - if n = isIndented(data[blockEnd:i], indentSize); n == 0 { - // this is the end of the block. - // we don't want to include this last line in the index. - break gatherLines - } - - // if there were blank lines before this one, insert a new one now - if containsBlankLine { - raw.WriteByte('\n') - containsBlankLine = false - } - - // get rid of that first tab, write to buffer - raw.Write(data[blockEnd+n : i]) - hasBlock = true - - blockEnd = i - } - - if data[blockEnd-1] != '\n' { - raw.WriteByte('\n') - } - - contents = raw.Bytes() - - return -} - -// IsPunctuation returns true if c is a punctuation symbol. -func IsPunctuation(c byte) bool { - for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") { - if c == r { - return true - } - } - return false -} - -// IsSpace returns true if c is a white-space charactr -func IsSpace(c byte) bool { - return c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f' || c == '\v' -} - -// IsLetter returns true if c is ascii letter -func IsLetter(c byte) bool { - return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') -} - -// IsAlnum returns true if c is a digit or letter -// TODO: check when this is looking for ASCII alnum and when it should use unicode -func IsAlnum(c byte) bool { - return (c >= '0' && c <= '9') || IsLetter(c) -} - -var URIs = [][]byte{ - []byte("http://"), - []byte("https://"), - []byte("ftp://"), - []byte("mailto:"), -} - -var Paths = [][]byte{ - []byte("/"), - []byte("./"), - []byte("../"), -} - -// IsSafeURL returns true if url starts with one of the valid schemes or is a relative path. -func IsSafeURL(url []byte) bool { - nLink := len(url) - for _, path := range Paths { - nPath := len(path) - linkPrefix := url[:nPath] - if nLink >= nPath && bytes.Equal(linkPrefix, path) { - if nLink == nPath { - return true - } else if IsAlnum(url[nPath]) { - return true - } - } - } - - for _, prefix := range URIs { - // TODO: handle unicode here - // case-insensitive prefix test - nPrefix := len(prefix) - if nLink > nPrefix { - linkPrefix := bytes.ToLower(url[:nPrefix]) - if bytes.Equal(linkPrefix, prefix) && IsAlnum(url[nPrefix]) { - return true - } - } - } - - return false -} - -// TODO: this is not used -// Replace tab characters with spaces, aligning to the next TAB_SIZE column. -// always ends output with a newline -/* -func expandTabs(out *bytes.Buffer, line []byte, tabSize int) { - // first, check for common cases: no tabs, or only tabs at beginning of line - i, prefix := 0, 0 - slowcase := false - for i = 0; i < len(line); i++ { - if line[i] == '\t' { - if prefix == i { - prefix++ - } else { - slowcase = true - break - } - } - } - - // no need to decode runes if all tabs are at the beginning of the line - if !slowcase { - for i = 0; i < prefix*tabSize; i++ { - out.WriteByte(' ') - } - out.Write(line[prefix:]) - return - } - - // the slow case: we need to count runes to figure out how - // many spaces to insert for each tab - column := 0 - i = 0 - for i < len(line) { - start := i - for i < len(line) && line[i] != '\t' { - _, size := utf8.DecodeRune(line[i:]) - i += size - column++ - } - - if i > start { - out.Write(line[start:i]) - } - - if i >= len(line) { - break - } - - for { - out.WriteByte(' ') - column++ - if column%tabSize == 0 { - break - } - } - - i++ - } -} -*/ - -// Find if a line counts as indented or not. -// Returns number of characters the indent is (0 = not indented). -func isIndented(data []byte, indentSize int) int { - if len(data) == 0 { - return 0 - } - if data[0] == '\t' { - return 1 - } - if len(data) < indentSize { - return 0 - } - for i := 0; i < indentSize; i++ { - if data[i] != ' ' { - return 0 - } - } - return indentSize -} - -// Create a url-safe slug for fragments -func slugify(in []byte) []byte { - if len(in) == 0 { - return in - } - out := make([]byte, 0, len(in)) - sym := false - - for _, ch := range in { - if IsAlnum(ch) { - sym = false - out = append(out, ch) - } else if sym { - continue - } else { - out = append(out, '-') - sym = true - } - } - var a, b int - var ch byte - for a, ch = range out { - if ch != '-' { - break - } - } - for b = len(out) - 1; b > 0; b-- { - if out[b] != '-' { - break - } - } - return out[a : b+1] -} - -func isListItem(d ast.Node) bool { - _, ok := d.(*ast.ListItem) - return ok -} - -func NormalizeNewlines(d []byte) []byte { - res := make([]byte, len(d)) - copy(res, d) - d = res - wi := 0 - n := len(d) - for i := 0; i < n; i++ { - c := d[i] - // 13 is CR - if c != 13 { - d[wi] = c - wi++ - continue - } - // replace CR (mac / win) with LF (unix) - d[wi] = 10 - wi++ - if i < n-1 && d[i+1] == 10 { - // this was CRLF, so skip the LF - i++ - } - - } - return d[:wi] -} diff --git a/vendor/github.com/gomarkdown/markdown/parser/ref.go b/vendor/github.com/gomarkdown/markdown/parser/ref.go deleted file mode 100644 index c1e053417b..0000000000 --- a/vendor/github.com/gomarkdown/markdown/parser/ref.go +++ /dev/null @@ -1,104 +0,0 @@ -package parser - -import ( - "bytes" - "fmt" - - "github.com/gomarkdown/markdown/ast" -) - -// parse '(#r, text)', where r does not contain spaces, but text may (similar to a citation). Or. (!item) (!item, -// subitem), for an index, (!!item) signals primary. -func maybeShortRefOrIndex(p *Parser, data []byte, offset int) (int, ast.Node) { - if len(data[offset:]) < 4 { - return 0, nil - } - // short ref first - data = data[offset:] - i := 1 - switch data[i] { - case '#': // cross ref - i++ - Loop: - for i < len(data) { - c := data[i] - switch { - case c == ')': - break Loop - case !IsAlnum(c): - if c == '_' || c == '-' || c == ':' || c == ' ' || c == ',' { - i++ - continue - } - i = 0 - break Loop - } - i++ - } - if i >= len(data) { - return 0, nil - } - if data[i] != ')' { - return 0, nil - } - - id := data[2:i] - node := &ast.CrossReference{} - node.Destination = id - if c := bytes.Index(id, []byte(",")); c > 0 { - idpart := id[:c] - suff := id[c+1:] - suff = bytes.TrimSpace(suff) - node.Destination = idpart - node.Suffix = suff - } - if bytes.Index(node.Destination, []byte(" ")) > 0 { - // no spaces allowed in id - return 0, nil - } - if bytes.Index(node.Destination, []byte(",")) > 0 { - // nor comma - return 0, nil - } - - return i + 1, node - - case '!': // index - i++ - start := i - i = skipUntilChar(data, start, ')') - - // did we reach the end of the buffer without a closing marker? - if i >= len(data) { - return 0, nil - } - - if len(data[start:i]) < 1 { - return 0, nil - } - - idx := &ast.Index{} - - idx.ID = fmt.Sprintf("idxref:%d", p.indexCnt) - p.indexCnt++ - - idx.Primary = data[start] == '!' - buf := data[start:i] - - if idx.Primary { - buf = buf[1:] - } - items := bytes.Split(buf, []byte(",")) - switch len(items) { - case 1: - idx.Item = bytes.TrimSpace(items[0]) - return i + 1, idx - case 2: - idx.Item = bytes.TrimSpace(items[0]) - idx.Subitem = bytes.TrimSpace(items[1]) - return i + 1, idx - } - } - - return 0, nil -} diff --git a/vendor/github.com/mmarkdown/mmark/LICENSE.txt b/vendor/github.com/mmarkdown/mmark/LICENSE.txt deleted file mode 100644 index 6880461027..0000000000 --- a/vendor/github.com/mmarkdown/mmark/LICENSE.txt +++ /dev/null @@ -1,31 +0,0 @@ -Markdown is distributed under the Simplified BSD License: - -Copyright © 2011 Russ Ross -Copyright © 2018 Krzysztof Kowalczyk -Copyright © 2018 Authors -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided with - the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/mmarkdown/mmark/mast/bibliography.go b/vendor/github.com/mmarkdown/mmark/mast/bibliography.go deleted file mode 100644 index 9f1167b173..0000000000 --- a/vendor/github.com/mmarkdown/mmark/mast/bibliography.go +++ /dev/null @@ -1,23 +0,0 @@ -package mast - -import ( - "github.com/gomarkdown/markdown/ast" - "github.com/mmarkdown/mmark/mast/reference" -) - -// Bibliography represents markdown bibliography node. -type Bibliography struct { - ast.Container - - Type ast.CitationTypes -} - -// BibliographyItem contains a single bibliography item. -type BibliographyItem struct { - ast.Leaf - - Anchor []byte - Type ast.CitationTypes - - Reference *reference.Reference // parsed reference XML -} diff --git a/vendor/github.com/mmarkdown/mmark/mast/index.go b/vendor/github.com/mmarkdown/mmark/mast/index.go deleted file mode 100644 index 6052ac6fd8..0000000000 --- a/vendor/github.com/mmarkdown/mmark/mast/index.go +++ /dev/null @@ -1,34 +0,0 @@ -package mast - -import "github.com/gomarkdown/markdown/ast" - -// DocumentIndex represents markdown document index node. -type DocumentIndex struct { - ast.Container -} - -// IndexItem contains an index for the indices section. -type IndexItem struct { - ast.Container - - *ast.Index -} - -// IndexSubItem contains an sub item index for the indices section. -type IndexSubItem struct { - ast.Container - - *ast.Index -} - -// IndexLetter has the Letter of this index item. -type IndexLetter struct { - ast.Container -} - -// IndexLink links to the index in the document. -type IndexLink struct { - *ast.Link - - Primary bool -} diff --git a/vendor/github.com/mmarkdown/mmark/mast/nodes.go b/vendor/github.com/mmarkdown/mmark/mast/nodes.go deleted file mode 100644 index 24ad9890b8..0000000000 --- a/vendor/github.com/mmarkdown/mmark/mast/nodes.go +++ /dev/null @@ -1,171 +0,0 @@ -package mast - -import ( - "bytes" - "sort" - - "github.com/gomarkdown/markdown/ast" -) - -// some extra functions for manipulation the AST - -// MoveChilderen moves the children from a to b *and* make the parent of each point to b. -// Any children of b are obliterated. -func MoveChildren(a, b ast.Node) { - a.SetChildren(b.GetChildren()) - b.SetChildren(nil) - - for _, child := range a.GetChildren() { - child.SetParent(a) - } -} - -// Some attribute helper functions. - -// AttributeFromNode returns the attribute from the node, if it was there was one. -func AttributeFromNode(node ast.Node) *ast.Attribute { - if c := node.AsContainer(); c != nil && c.Attribute != nil { - return c.Attribute - } - if l := node.AsLeaf(); l != nil && l.Attribute != nil { - return l.Attribute - } - return nil -} - -// AttributeInit will initialize an *Attribute on node if there wasn't one. -func AttributeInit(node ast.Node) { - if l := node.AsLeaf(); l != nil && l.Attribute == nil { - l.Attribute = &ast.Attribute{Attrs: make(map[string][]byte)} - return - } - if c := node.AsContainer(); c != nil && c.Attribute == nil { - c.Attribute = &ast.Attribute{Attrs: make(map[string][]byte)} - return - } -} - -// DeleteAttribute delete the attribute under key from a. -func DeleteAttribute(node ast.Node, key string) { - a := AttributeFromNode(node) - if a == nil { - return - } - - switch key { - case "id": - a.ID = nil - case "class": - // TODO - default: - delete(a.Attrs, key) - } -} - -// SetAttribute sets the attribute under key to value. -func SetAttribute(node ast.Node, key string, value []byte) { - a := AttributeFromNode(node) - if a == nil { - return - } - switch key { - case "id": - a.ID = value - case "class": - // TODO - default: - a.Attrs[key] = value - } -} - -// Attribute returns the attribute value under key. Use AttributeClass to retrieve -// a class. -func Attribute(node ast.Node, key string) []byte { - a := AttributeFromNode(node) - if a == nil { - return nil - } - switch key { - case "id": - return a.ID - case "class": - // use AttributeClass. - } - - return a.Attrs[key] -} - -func AttributeBytes(attr *ast.Attribute) []byte { - ret := &bytes.Buffer{} - ret.WriteByte('{') - if len(attr.ID) != 0 { - ret.WriteByte('#') - ret.Write(attr.ID) - } - for _, c := range attr.Classes { - if ret.Len() > 1 { - ret.WriteByte(' ') - } - ret.WriteByte('.') - ret.Write(c) - } - - keys := []string{} - for k := range attr.Attrs { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - if ret.Len() > 1 { - ret.WriteByte(' ') - } - ret.WriteString(k) - ret.WriteString(`="`) - ret.Write(attr.Attrs[k]) - ret.WriteByte('"') - } - ret.WriteByte('}') - return ret.Bytes() -} - -// AttributeClass returns true is class key is set. -func AttributeClass(node ast.Node, key string) bool { - a := AttributeFromNode(node) - if a == nil { - return false - } - for _, c := range a.Classes { - if string(c) == key { - return true - } - } - return false -} - -// AttributeFilter runs the attribute on node through filter and only allows elements for which filter returns true. -func AttributeFilter(node ast.Node, filter func(key string) bool) { - a := AttributeFromNode(node) - if a == nil { - return - } - if !filter("id") { - a.ID = nil - } - if !filter("class") { - a.Classes = nil - } - for k, _ := range a.Attrs { - if !filter(k) { - delete(a.Attrs, k) - } - } -} - -// FilterFunc checks if s is an allowed key in an attribute. -// If s is: -// "id" the ID should be checked -// "class" the classes should be allowed or disallowed -// any other string means checking the individual attributes. -// it returns true for elements that are allows, false otherwise. -type FilterFunc func(s string) bool diff --git a/vendor/github.com/mmarkdown/mmark/mast/reference/reference.go b/vendor/github.com/mmarkdown/mmark/mast/reference/reference.go deleted file mode 100644 index cefed43327..0000000000 --- a/vendor/github.com/mmarkdown/mmark/mast/reference/reference.go +++ /dev/null @@ -1,71 +0,0 @@ -// Package reference defines the elements of a block. -package reference - -import "encoding/xml" - -// Author is the reference author. -type Author struct { - Fullname string `xml:"fullname,attr,omitempty"` - Initials string `xml:"initials,attr,omitempty"` - Surname string `xml:"surname,attr,omitempty"` - Role string `xml:"role,attr,omitempty"` - Organization *Organization `xml:"organization,omitempty"` - Address *Address `xml:"address,omitempty"` -} - -type Organization struct { - Abbrev string `xml:"abbrev,attr,omitempty"` - Value string `xml:",chardata"` -} - -// this is copied from ../title.go; it might make sense to unify them, both especially, it we -// want to allow reference to be given in TOML as well. See #55. -// Author denotes an RFC author. - -// Address denotes the address of an RFC author. -type Address struct { - Phone string `xml:"phone,omitempty"` - Email string `xml:"email,omitempty"` - URI string `xml:"uri,omitempty"` - Postal AddressPostal `xml:"postal,omitempty"` -} - -// AddressPostal denotes the postal address of an RFC author. -type AddressPostal struct { - PostalLine []string `xml:"postalline,omitempty"` - - Streets []string `xml:"street,omitempty"` - Cities []string `xml:"city,omitempty"` - Codes []string `xml:"code,omitempty"` - Countries []string `xml:"country,omitempty"` - Regions []string `xml:"region,omitempty"` -} - -// Date is the reference date. -type Date struct { - Year string `xml:"year,attr,omitempty"` - Month string `xml:"month,attr,omitempty"` - Day string `xml:"day,attr,omitempty"` -} - -// Front the reference . -type Front struct { - Title string `xml:"title"` - Authors []Author `xml:"author,omitempty"` - Date Date `xml:"date"` -} - -// Format is the reference . This is deprecated in RFC 7991, see Section 3.3. -type Format struct { - Type string `xml:"type,attr,omitempty"` - Target string `xml:"target,attr"` -} - -// Reference is the entire structure. -type Reference struct { - XMLName xml.Name `xml:"reference"` - Anchor string `xml:"anchor,attr"` - Front Front `xml:"front"` - Format *Format `xml:"format,omitempty"` - Target string `xml:"target,attr"` -} diff --git a/vendor/github.com/mmarkdown/mmark/mast/title.go b/vendor/github.com/mmarkdown/mmark/mast/title.go deleted file mode 100644 index 4bcaffc57d..0000000000 --- a/vendor/github.com/mmarkdown/mmark/mast/title.go +++ /dev/null @@ -1,95 +0,0 @@ -package mast - -import ( - "time" - - "github.com/gomarkdown/markdown/ast" -) - -// Title represents the TOML encoded title block. -type Title struct { - ast.Leaf - *TitleData - Trigger string // either triggered by %%% or --- -} - -// NewTitle returns a pointer to TitleData with some defaults set. -func NewTitle(trigger byte) *Title { - t := &Title{ - TitleData: &TitleData{ - Area: "Internet", - Ipr: "trust200902", - Consensus: true, - }, - } - t.Trigger = string([]byte{trigger, trigger, trigger}) - return t -} - -const triggerDash = "---" - -func (t *Title) IsTriggerDash() bool { return t.Trigger == triggerDash } - -// TitleData holds all the elements of the title. -type TitleData struct { - Title string - Abbrev string - - SeriesInfo SeriesInfo - Consensus bool - Ipr string // See https://tools.ietf.org/html/rfc7991#appendix-A.1 - Obsoletes []int - Updates []int - SubmissionType string // IETF, IAB, IRTF or independent - - Date time.Time - Area string - Workgroup string - Keyword []string - Author []Author -} - -// SeriesInfo holds details on the Internet-Draft or RFC, see https://tools.ietf.org/html/rfc7991#section-2.47 -type SeriesInfo struct { - Name string // name of the document, values are "RFC", "Internet-Draft", and "DOI" - Value string // either draft name, or number - Status string // The status of this document, values: "standard", "informational", "experimental", "bcp", "fyi", and "full-standard" - Stream string // "IETF" (default),"IAB", "IRTF" or "independent" -} - -// Author denotes an RFC author. -type Author struct { - Initials string - Surname string - Fullname string - Organization string - OrganizationAbbrev string `toml:"abbrev"` - Role string - ASCII string - Address Address -} - -// Address denotes the address of an RFC author. -type Address struct { - Phone string - Email string - URI string - Postal AddressPostal -} - -// AddressPostal denotes the postal address of an RFC author. -type AddressPostal struct { - Street string - City string - Code string - Country string - Region string - PostalLine []string - - // Plurals when these need to be specified multiple times. - Streets []string - Cities []string - Codes []string - Countries []string - Regions []string -} diff --git a/vendor/github.com/mmarkdown/mmark/mparser/bibliography.go b/vendor/github.com/mmarkdown/mmark/mparser/bibliography.go deleted file mode 100644 index c1275b6d8e..0000000000 --- a/vendor/github.com/mmarkdown/mmark/mparser/bibliography.go +++ /dev/null @@ -1,184 +0,0 @@ -package mparser - -import ( - "bytes" - "encoding/xml" - "log" - - "github.com/gomarkdown/markdown/ast" - "github.com/mmarkdown/mmark/mast" - "github.com/mmarkdown/mmark/mast/reference" -) - -// CitationToBibliography walks the AST and gets all the citations on HTML blocks and groups them into -// normative and informative references. -func CitationToBibliography(doc ast.Node) (normative ast.Node, informative ast.Node) { - seen := map[string]*mast.BibliographyItem{} - raw := map[string][]byte{} - - // Gather all citations. - // Gather all reference HTML Blocks to see if we have XML we can output. - ast.WalkFunc(doc, func(node ast.Node, entering bool) ast.WalkStatus { - switch c := node.(type) { - case *ast.Citation: - for i, d := range c.Destination { - if _, ok := seen[string(bytes.ToLower(d))]; ok { - continue - } - ref := &mast.BibliographyItem{} - ref.Anchor = d - ref.Type = c.Type[i] - - seen[string(d)] = ref - } - case *ast.HTMLBlock: - anchor := anchorFromReference(c.Literal) - if anchor != nil { - raw[string(bytes.ToLower(anchor))] = c.Literal - } - } - return ast.GoToNext - }) - - for _, r := range seen { - // If we have a reference anchor and the raw XML add that here. - if raw, ok := raw[string(bytes.ToLower(r.Anchor))]; ok { - var x reference.Reference - if e := xml.Unmarshal(raw, &x); e != nil { - log.Printf("Failed to unmarshal reference: %q: %s", r.Anchor, e) - continue - } - r.Reference = &x - } - - switch r.Type { - case ast.CitationTypeInformative: - if informative == nil { - informative = &mast.Bibliography{Type: ast.CitationTypeInformative} - } - - ast.AppendChild(informative, r) - case ast.CitationTypeSuppressed: - fallthrough - case ast.CitationTypeNormative: - if normative == nil { - normative = &mast.Bibliography{Type: ast.CitationTypeNormative} - } - ast.AppendChild(normative, r) - } - } - return normative, informative -} - -// NodeBackMatter is the place where we should inject the bibliography -func NodeBackMatter(doc ast.Node) ast.Node { - var matter ast.Node - - ast.WalkFunc(doc, func(node ast.Node, entering bool) ast.WalkStatus { - if mat, ok := node.(*ast.DocumentMatter); ok { - if mat.Matter == ast.DocumentMatterBack { - matter = mat - return ast.Terminate - } - } - return ast.GoToNext - }) - return matter -} - -// Parse '' and return the string after anchor= is the ID for the reference. -func anchorFromReference(data []byte) []byte { - if !bytes.HasPrefix(data, []byte("') { - i++ - } - - // no end-of-reference marker - if i > len(data) { - return nil, false - } - return data[:i], true -} - -func fmtReference(data []byte) []byte { - var x reference.Reference - if e := xml.Unmarshal(data, &x); e != nil { - return data - } - - out, e := xml.MarshalIndent(x, "", " ") - if e != nil { - return data - } - return out -} - -// AddBibliography adds the bibliography to the document. It will be -// added just after the backmatter node. If that node can't be found this -// function returns false and does nothing. -func AddBibliography(doc ast.Node) bool { - where := NodeBackMatter(doc) - if where == nil { - return false - } - - norm, inform := CitationToBibliography(doc) - if norm != nil { - ast.AppendChild(where, norm) - } - if inform != nil { - ast.AppendChild(where, inform) - } - return (norm != nil) || (inform != nil) -} diff --git a/vendor/github.com/mmarkdown/mmark/mparser/extensions.go b/vendor/github.com/mmarkdown/mmark/mparser/extensions.go deleted file mode 100644 index af25f5d11e..0000000000 --- a/vendor/github.com/mmarkdown/mmark/mparser/extensions.go +++ /dev/null @@ -1,11 +0,0 @@ -package mparser - -import ( - "github.com/gomarkdown/markdown/parser" -) - -// Extensions is the default set of extensions mmark requires. -var Extensions = parser.Tables | parser.FencedCode | parser.Autolink | parser.Strikethrough | - parser.SpaceHeadings | parser.HeadingIDs | parser.BackslashLineBreak | parser.SuperSubscript | - parser.DefinitionLists | parser.MathJax | parser.AutoHeadingIDs | parser.Footnotes | - parser.Strikethrough | parser.OrderedListStart | parser.Attributes | parser.Mmark | parser.Includes diff --git a/vendor/github.com/mmarkdown/mmark/mparser/hook.go b/vendor/github.com/mmarkdown/mmark/mparser/hook.go deleted file mode 100644 index 40b16798ee..0000000000 --- a/vendor/github.com/mmarkdown/mmark/mparser/hook.go +++ /dev/null @@ -1,59 +0,0 @@ -package mparser - -import ( - "io/ioutil" - "log" - "path/filepath" - - "github.com/gomarkdown/markdown/ast" - "github.com/gomarkdown/markdown/parser" -) - -var UnsafeInclude parser.Flags = 1 << 3 - -// Hook will call both TitleHook and ReferenceHook. -func Hook(data []byte) (ast.Node, []byte, int) { - n, b, i := TitleHook(data) - if n != nil { - return n, b, i - } - - return ReferenceHook(data) -} - -// ReadInclude is the hook to read includes. -// Its supports the following options for address. -// -// 4,5 - line numbers separated by commas -// N, - line numbers, end not specified, read until the end. -// /start/,/end/ - regexp separated by commas -// optional a prefix="" string. -func (i Initial) ReadInclude(from, file string, address []byte) []byte { - path := i.path(from, file) - - if i.Flags&UnsafeInclude == 0 { - if ok := i.pathAllowed(path); !ok { - log.Printf("Failure to read: %q: path is not on or below %q", path, i.i) - return nil - } - } - - data, err := ioutil.ReadFile(path) - if err != nil { - log.Printf("Failure to read: %q (from %q)", err, filepath.Join(from, "*")) - return nil - } - - data, err = parseAddress(address, data) - if err != nil { - log.Printf("Failure to parse address for %q: %q (from %q)", path, err, filepath.Join(from, "*")) - return nil - } - if len(data) == 0 { - return data - } - if data[len(data)-1] != '\n' { - data = append(data, '\n') - } - return data -} diff --git a/vendor/github.com/mmarkdown/mmark/mparser/include.go b/vendor/github.com/mmarkdown/mmark/mparser/include.go deleted file mode 100644 index 8737def73b..0000000000 --- a/vendor/github.com/mmarkdown/mmark/mparser/include.go +++ /dev/null @@ -1,251 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Adapted for mmark, by Miek Gieben, 2015. -// Adapted for mmark2 (fastly simplified and features removed), 2018. - -package mparser - -import ( - "bytes" - "errors" - "fmt" - "os" - "path" - "path/filepath" - "regexp" - "strconv" - "strings" - - "github.com/gomarkdown/markdown/parser" -) - -// Initial is the initial file we are working on, empty for stdin and adjusted is we we have an absolute or relative file. -type Initial struct { - Flags parser.Flags - i string -} - -// NewInitial returns an initialized Initial. -func NewInitial(s string) Initial { - if path.IsAbs(s) { - return Initial{i: path.Dir(s)} - } - - cwd, _ := os.Getwd() - if s == "" { - return Initial{i: cwd} - } - return Initial{i: path.Dir(filepath.Join(cwd, s))} -} - -// path returns the full path we should use according to from, file and initial. -func (i Initial) path(from, file string) string { - if path.IsAbs(file) { - return file - } - if path.IsAbs(from) { - filepath.Join(from, file) - } - - f1 := filepath.Join(i.i, from) - - return filepath.Join(f1, file) -} - -// pathAllowed returns true is file is on the same level or below the initial file. -func (i Initial) pathAllowed(file string) bool { - x, err := filepath.Rel(i.i, file) - if err != nil { - return false - } - return !strings.Contains(x, "..") -} - -// parseAddress parses a code address directive and returns the bytes or an error. -func parseAddress(addr []byte, data []byte) ([]byte, error) { - bytes.TrimSpace(addr) - - if len(addr) == 0 { - return data, nil - } - - // check for prefix, either as ;prefix, prefix; or just standalone prefix. - var prefix []byte - if x := bytes.Index(addr, []byte("prefix=")); x >= 0 { - if x+1 > len(addr) { - return nil, fmt.Errorf("invalid prefix in address specification: %s", addr) - } - start := x + len("prefix=") - quote := addr[start] - if quote != '\'' && quote != '"' { - return nil, fmt.Errorf("invalid prefix in address specification: %s", addr) - } - - end := SkipUntilChar(addr, start+1, quote) - prefix = addr[start+1 : end] - if len(prefix) == 0 { - return nil, fmt.Errorf("invalid prefix in address specification: %s", addr) - } - - addr = append(addr[:x], addr[end+1:]...) - addr = bytes.Replace(addr, []byte(";"), []byte(""), 1) - if len(addr) == 0 { - data = addPrefix(data, prefix) - return data, nil - } - } - - lo, hi, err := addrToByteRange(addr, data) - if err != nil { - return nil, err - } - - // Acme pattern matches can stop mid-line, - // so run to end of line in both directions if not at line start/end. - for lo > 0 && data[lo-1] != '\n' { - lo-- - } - if hi > 0 { - for hi < len(data) && data[hi-1] != '\n' { - hi++ - } - } - - data = data[lo:hi] - if prefix != nil { - data = addPrefix(data, prefix) - } - return data, nil -} - -// addrToByteRange evaluates the given address. It returns the start and end index of the data we should return. -// Supported syntax: N, M or /start/, /end/ . -func addrToByteRange(addr, data []byte) (lo, hi int, err error) { - chunk := bytes.Split(addr, []byte(",")) - if len(chunk) != 2 { - return 0, 0, fmt.Errorf("invalid address specification: %s", addr) - } - left := bytes.TrimSpace(chunk[0]) - right := bytes.TrimSpace(chunk[1]) - - if len(left) == 0 { - return 0, 0, fmt.Errorf("invalid address specification: %s", addr) - } - if len(right) == 0 { - // open ended right term - } - - if left[0] == '/' { //regular expression - if left[len(left)-1] != '/' { - return 0, 0, fmt.Errorf("invalid address specification: %s", addr) - } - if right[0] != '/' { - return 0, 0, fmt.Errorf("invalid address specification: %s", addr) - } - if right[len(right)-1] != '/' { - return 0, 0, fmt.Errorf("invalid address specification: %s", addr) - } - - lo, hi, err = addrRegexp(data, string(left[1:len(left)-1]), string(right[1:len(right)-1])) - if err != nil { - return 0, 0, err - } - } else { - lo, err = strconv.Atoi(string(left)) - if err != nil { - return 0, 0, err - } - i, j := 0, 0 - for i < len(data) { - if data[i] == '\n' { - j++ - if j >= lo { - break - } - } - i++ - } - lo = i - - if len(right) == 0 { - hi = len(data) - goto End - } - - hi, err = strconv.Atoi(string(right)) - if err != nil { - return 0, 0, err - } - i, j = 0, 0 - for i < len(data) { - if data[i] == '\n' { - j++ - if j+1 >= hi { - break - } - } - i++ - } - hi = i - } - -End: - if lo > hi { - return 0, 0, fmt.Errorf("invalid address specification: %s", addr) - } - - return lo, hi, nil -} - -// addrRegexp searches for pattern start and pattern end -func addrRegexp(data []byte, start, end string) (int, int, error) { - start = "(?m:" + start + ")" // match through newlines - reStart, err := regexp.Compile(start) - if err != nil { - return 0, 0, err - } - - end = "(?m:" + end + ")" - reEnd, err := regexp.Compile(end) - if err != nil { - return 0, 0, err - } - m := reStart.FindIndex(data) - if len(m) == 0 { - return 0, 0, errors.New("no match for " + start) - } - lo := m[0] - - m = reEnd.FindIndex(data[lo:]) // start *from* lo - if len(m) == 0 { - return 0, 0, errors.New("no match for " + end) - } - hi := m[0] - - return lo, hi, nil -} - -func SkipUntilChar(data []byte, i int, c byte) int { - n := len(data) - for i < n && data[i] != c { - i++ - } - return i -} - -func addPrefix(data, prefix []byte) []byte { - b := &bytes.Buffer{} - b.Write(prefix) - // assured that data ends in newline - i := 0 - for i < len(data)-1 { - b.WriteByte(data[i]) - if data[i] == '\n' { - b.Write(prefix) - } - i++ - } - return b.Bytes() -} diff --git a/vendor/github.com/mmarkdown/mmark/mparser/index.go b/vendor/github.com/mmarkdown/mmark/mparser/index.go deleted file mode 100644 index 72b39c0594..0000000000 --- a/vendor/github.com/mmarkdown/mmark/mparser/index.go +++ /dev/null @@ -1,111 +0,0 @@ -package mparser - -import ( - "bytes" - "fmt" - "sort" - - "github.com/gomarkdown/markdown/ast" - "github.com/mmarkdown/mmark/mast" -) - -// IndexToDocumentIndex crawls the entire doc searching for indices, it will then return -// an mast.DocumentIndex that contains a tree: -// -// IndexLetter -// - IndexItem -// - IndexLink -// - IndexSubItem -// - IndexLink -// - IndexLink -// -// Which can then be rendered by the renderer. -func IndexToDocumentIndex(doc ast.Node) *mast.DocumentIndex { - main := map[string]*mast.IndexItem{} - subitem := map[string][]*mast.IndexSubItem{} // gather these so we can add them in one swoop at the end - - // Gather all indexes. - ast.WalkFunc(doc, func(node ast.Node, entering bool) ast.WalkStatus { - switch i := node.(type) { - case *ast.Index: - item := string(i.Item) - - if _, ok := main[item]; !ok { - main[item] = &mast.IndexItem{Index: i} - } - // only the main item - if i.Subitem == nil { - ast.AppendChild(main[item], newLink(i.ID, len(main[item].GetChildren()), i.Primary)) - return ast.GoToNext - } - // check if we already have a child with the subitem and then just add the link - for _, sub := range subitem[item] { - if bytes.Compare(sub.Subitem, i.Subitem) == 0 { - ast.AppendChild(sub, newLink(i.ID, len(sub.GetChildren()), i.Primary)) - return ast.GoToNext - } - } - - sub := &mast.IndexSubItem{Index: i} - ast.AppendChild(sub, newLink(i.ID, len(subitem[item]), i.Primary)) - subitem[item] = append(subitem[item], sub) - } - return ast.GoToNext - }) - if len(main) == 0 { - return nil - } - - // Now add a subitem children to the correct main item. - for k, sub := range subitem { - // sort sub here ideally - for j := range sub { - ast.AppendChild(main[k], sub[j]) - } - } - - keys := []string{} - for k := range main { - keys = append(keys, k) - } - sort.Strings(keys) - - letters := []*mast.IndexLetter{} - var prevLetter byte - var il *mast.IndexLetter - for _, k := range keys { - letter := k[0] - if letter != prevLetter { - il = &mast.IndexLetter{} - il.Literal = []byte{letter} - letters = append(letters, il) - } - ast.AppendChild(il, main[k]) - prevLetter = letter - } - docIndex := &mast.DocumentIndex{} - for i := range letters { - ast.AppendChild(docIndex, letters[i]) - } - - return docIndex -} - -func newLink(id string, number int, primary bool) *mast.IndexLink { - link := &ast.Link{Destination: []byte(id)} - il := &mast.IndexLink{Link: link, Primary: primary} - il.Literal = []byte(fmt.Sprintf("%d", number)) - return il -} - -// AddIndex adds an index to the end of the current document. If not indices can be found -// this returns false and no index will be added. -func AddIndex(doc ast.Node) bool { - idx := IndexToDocumentIndex(doc) - if idx == nil { - return false - } - - ast.AppendChild(doc, idx) - return true -} diff --git a/vendor/github.com/mmarkdown/mmark/mparser/title.go b/vendor/github.com/mmarkdown/mmark/mparser/title.go deleted file mode 100644 index 3c5f72b718..0000000000 --- a/vendor/github.com/mmarkdown/mmark/mparser/title.go +++ /dev/null @@ -1,57 +0,0 @@ -package mparser - -import ( - "log" - - "github.com/BurntSushi/toml" - "github.com/gomarkdown/markdown/ast" - "github.com/mmarkdown/mmark/mast" -) - -// TitleHook will parse a title and returns it. The start and ending can -// be signalled with %%% or --- (the later to more inline with Hugo and other markdown dialects. -func TitleHook(data []byte) (ast.Node, []byte, int) { - i := 0 - if len(data) < 3 { - return nil, nil, 0 - } - - c := data[i] // first char can either be % or - - if c != '%' && c != '-' { - return nil, nil, 0 - } - - if data[i] != c || data[i+1] != c || data[i+2] != c { - return nil, nil, 0 - } - - i += 3 - beg := i - found := false - // search for end. - for i < len(data) { - if data[i] == c && data[i+1] == c && data[i+2] == c { - found = true - break - } - i++ - } - if !found { - return nil, nil, 0 - } - - node := mast.NewTitle(c) - buf := data[beg:i] - - if c == '-' { - node.Content = buf - return node, nil, i + 3 - } - - if _, err := toml.Decode(string(buf), node.TitleData); err != nil { - log.Printf("Failure parsing title block: %s", err) - } - node.Content = buf - - return node, nil, i + 3 -} diff --git a/vendor/modules.txt b/vendor/modules.txt index ced470b865..b9cb56c182 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -131,10 +131,6 @@ github.com/Azure/go-autorest/logger # github.com/Azure/go-autorest/tracing v0.6.0 ## explicit; go 1.12 github.com/Azure/go-autorest/tracing -# github.com/BurntSushi/toml v1.3.2 -## explicit; go 1.16 -github.com/BurntSushi/toml -github.com/BurntSushi/toml/internal # github.com/Microsoft/go-winio v0.6.2 ## explicit; go 1.21 github.com/Microsoft/go-winio @@ -568,11 +564,6 @@ github.com/golang/protobuf/proto # github.com/golang/snappy v0.0.4 ## explicit github.com/golang/snappy -# github.com/gomarkdown/markdown v0.0.0-20240328165702-4d01890c35c0 -## explicit; go 1.12 -github.com/gomarkdown/markdown/ast -github.com/gomarkdown/markdown/html -github.com/gomarkdown/markdown/parser # github.com/google/certificate-transparency-go v1.1.8 ## explicit; go 1.21 github.com/google/certificate-transparency-go @@ -766,11 +757,6 @@ github.com/mitchellh/go-wordwrap # github.com/mitchellh/mapstructure v1.5.0 ## explicit; go 1.14 github.com/mitchellh/mapstructure -# github.com/mmarkdown/mmark v2.0.40+incompatible -## explicit -github.com/mmarkdown/mmark/mast -github.com/mmarkdown/mmark/mast/reference -github.com/mmarkdown/mmark/mparser # github.com/moby/sys/mountinfo v0.7.1 ## explicit; go 1.16 github.com/moby/sys/mountinfo @@ -2010,10 +1996,6 @@ sigs.k8s.io/gateway-api/apis/v1 ## explicit; go 1.18 sigs.k8s.io/json sigs.k8s.io/json/internal/golang/encoding/json -# sigs.k8s.io/mdtoc v1.3.0 -## explicit; go 1.21 -sigs.k8s.io/mdtoc -sigs.k8s.io/mdtoc/pkg/mdtoc # sigs.k8s.io/release-utils v0.8.1 ## explicit; go 1.21 sigs.k8s.io/release-utils/command diff --git a/vendor/sigs.k8s.io/mdtoc/.gitignore b/vendor/sigs.k8s.io/mdtoc/.gitignore deleted file mode 100644 index 3664be5146..0000000000 --- a/vendor/sigs.k8s.io/mdtoc/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -./mdtoc -coverage* -dist/ -output/ diff --git a/vendor/sigs.k8s.io/mdtoc/.golangci.yml b/vendor/sigs.k8s.io/mdtoc/.golangci.yml deleted file mode 100644 index 3c4abcd9a4..0000000000 --- a/vendor/sigs.k8s.io/mdtoc/.golangci.yml +++ /dev/null @@ -1,196 +0,0 @@ ---- -run: - concurrency: 6 - deadline: 5m -issues: -linters: - disable-all: true - enable: - - asasalint - - asciicheck - - bidichk - - bodyclose - - containedctx - - contextcheck - - copyloopvar - - decorder - - dogsled - - dupl - - dupword - - durationcheck - - errcheck - - errchkjson - - errname - - errorlint - - execinquery - - exhaustive - - exportloopref - - forcetypeassert - - funlen - - gci - - ginkgolinter - - gocheckcompilerdirectives - - gochecksumtype - - gocognit - - goconst - - gocritic - - gocyclo - - godot - - godox - - gofmt - - gofumpt - - goheader - - goimports - - gomoddirectives - - gomodguard - - goprintffuncname - - gosec - - gosimple - - gosmopolitan - - govet - - grouper - - importas - - inamedparam - - ineffassign - - interfacebloat - - intrange - - loggercheck - - maintidx - - makezero - - mirror - - misspell - - musttag - - nakedret - - nestif - - nilerr - - nilnil - - noctx - - nolintlint - - nosprintfhostport - - paralleltest - - perfsprint - - prealloc - - predeclared - - promlinter - - protogetter - - reassign - - revive - - rowserrcheck - - sloglint - - spancheck - - sqlclosecheck - - staticcheck - - stylecheck - - tagalign - - tagliatelle - - tenv - - testableexamples - - testifylint - - thelper - - tparallel - - typecheck - - unconvert - - unparam - - unused - - usestdlibvars - - wastedassign - - whitespace - - wrapcheck - - zerologlint - # - cyclop - # - depguard - # - exhaustruct - # - forbidigo - # - gochecknoglobals - # - gochecknoinits - # - goerr113 - # - gomnd - # - ireturn - # - lll - # - nlreturn - # - nonamedreturns - # - testpackage - # - varnamelen - # - wsl -linters-settings: - godox: - keywords: - - BUG - - FIXME - - HACK - errcheck: - check-type-assertions: true - check-blank: true - gocritic: - enabled-checks: - - appendCombine - - badLock - - badRegexp - - badSorting - - badSyncOnceFunc - - boolExprSimplify - - builtinShadow - - builtinShadowDecl - - commentedOutCode - - commentedOutImport - - deferInLoop - - deferUnlambda - - docStub - - dupImport - - dynamicFmtString - - emptyDecl - - emptyFallthrough - - emptyStringTest - - equalFold - - evalOrder - - exposedSyncMutex - - externalErrorReassign - - filepathJoin - - hexLiteral - - httpNoBody - - hugeParam - - importShadow - - indexAlloc - - initClause - - methodExprCall - - nestingReduce - - nilValReturn - - octalLiteral - - paramTypeCombine - - preferDecodeRune - - preferFilepathJoin - - preferFprint - - preferStringWriter - - preferWriteByte - - ptrToRefParam - - rangeExprCopy - - rangeValCopy - - redundantSprint - - regexpPattern - - regexpSimplify - - returnAfterHttpError - - ruleguard - - sliceClear - - sloppyReassign - - sortSlice - - sprintfQuotedString - - sqlQuery - - stringConcatSimplify - - stringXbytes - - stringsCompare - - syncMapLoadAndDelete - - timeExprSimplify - - todoCommentWithoutDetail - - tooManyResultsChecker - - truncateCmp - - typeAssertChain - - typeDefFirst - - typeUnparen - - uncheckedInlineErr - - unlabelStmt - - unnamedResult - - unnecessaryBlock - - unnecessaryDefer - - weakCond - - whyNoLint - - yodaStyleExpr diff --git a/vendor/sigs.k8s.io/mdtoc/.goreleaser.yml b/vendor/sigs.k8s.io/mdtoc/.goreleaser.yml deleted file mode 100644 index 13b3d4614b..0000000000 --- a/vendor/sigs.k8s.io/mdtoc/.goreleaser.yml +++ /dev/null @@ -1,82 +0,0 @@ -project_name: mdtoc - -env: - - CGO_ENABLED=0 - - COSIGN_YES=true - -before: - hooks: - - go mod tidy - # - /bin/bash -c 'if [ -n "$(git --no-pager diff --exit-code go.mod go.sum)" ]; then exit 1; fi' - -gomod: - proxy: true - -builds: - - id: mdtoc - dir: . - no_unique_dist_dir: true - binary: mdtoc-{{ .Arch }}-{{ .Os }} - goos: - - darwin - - linux - - windows - goarch: - - amd64 - - arm64 - - arm - goarm: - - '7' - ignore: - - goos: windows - goarch: arm - flags: - - -trimpath - ldflags: - - "{{ .Env.LDFLAGS }}" - -archives: - - format: binary - name_template: "{{ .Binary }}" - allow_different_binary_count: true - -signs: - # Keyless - - id: mdtoc-keyless - signature: "${artifact}.sig" - certificate: "${artifact}.pem" - cmd: cosign - args: ["sign-blob", "--output-signature", "${artifact}.sig", "--output-certificate", "${artifact}.pem", "${artifact}"] - artifacts: all - -sboms: - - id: mdtoc - cmd: bom - args: - - generate - - "--output" - - "mdtoc-bom.json.spdx" - - "-d" - - "../" - - "-c" - - "../.mdtoc-bom-config.yaml" - - "--format" - - "json" - artifacts: any - documents: - - "mdtoc-bom.json.spdx" - -checksum: - name_template: 'checksums.txt' - -snapshot: - name_template: "{{ .Tag }}-next" - -release: - github: - owner: kubernetes-sigs - name: mdtoc - prerelease: auto - -changelog: - disable: true diff --git a/vendor/sigs.k8s.io/mdtoc/.mdtoc-bom-config.yaml b/vendor/sigs.k8s.io/mdtoc/.mdtoc-bom-config.yaml deleted file mode 100644 index 1dcf3a1ad5..0000000000 --- a/vendor/sigs.k8s.io/mdtoc/.mdtoc-bom-config.yaml +++ /dev/null @@ -1,43 +0,0 @@ ---- -namespace: https://sigs.k8s.io/zeitgeist -license: Apache-2.0 -name: mdtoc -creator: - person: The Kubernetes Authors - tool: mdtoc - -artifacts: - - type: file - source: mdtoc-amd64-windows.exe - license: Apache-2.0 - gomodules: true - - - type: file - source: mdtoc-arm64-windows.exe - license: Apache-2.0 - gomodules: true - - - type: file - source: mdtoc-amd64-darwin - license: Apache-2.0 - gomodules: true - - - type: file - source: mdtoc-amd64-linux - license: Apache-2.0 - gomodules: true - - - type: file - source: mdtoc-arm-linux - license: Apache-2.0 - gomodules: true - - - type: file - source: mdtoc-arm64-darwin - license: Apache-2.0 - gomodules: true - - - type: file - source: mdtoc-arm64-linux - license: Apache-2.0 - gomodules: true diff --git a/vendor/sigs.k8s.io/mdtoc/CONTRIBUTING.md b/vendor/sigs.k8s.io/mdtoc/CONTRIBUTING.md deleted file mode 100644 index 93d6656efe..0000000000 --- a/vendor/sigs.k8s.io/mdtoc/CONTRIBUTING.md +++ /dev/null @@ -1,48 +0,0 @@ -# Contributing Guidelines - -Welcome to Kubernetes. We are excited about the prospect of you joining our -[community](https://git.k8s.io/community)! The Kubernetes community abides by -the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt: - -_As contributors and maintainers of this project, and in the interest of -fostering an open and welcoming community, we pledge to respect all people who -contribute through reporting issues, posting feature requests, updating -documentation, submitting pull requests or patches, and other activities._ - -## Getting Started - -We have full documentation on how to get started contributing here: - - - -- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) - Kubernetes projects require that you sign a Contributor License Agreement - (CLA) before we can accept your pull requests -- [Kubernetes Contributor - Guide](https://git.k8s.io/community/contributors/guide) - Main contributor - documentation, or you can just jump directly to the [contributing - section](https://git.k8s.io/community/contributors/guide#contributing) -- [Contributor Cheat - Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet) - - Common resources for existing developers - -## Mentorship - -- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a - diverse set of mentorship programs available that are always looking for - volunteers! - - diff --git a/vendor/sigs.k8s.io/mdtoc/LICENSE b/vendor/sigs.k8s.io/mdtoc/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/vendor/sigs.k8s.io/mdtoc/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/sigs.k8s.io/mdtoc/Makefile b/vendor/sigs.k8s.io/mdtoc/Makefile deleted file mode 100644 index 22ba216840..0000000000 --- a/vendor/sigs.k8s.io/mdtoc/Makefile +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright 2020 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# If you update this file, please follow -# https://suva.sh/posts/well-documented-makefiles - -.DEFAULT_GOAL:=help -SHELL:=/usr/bin/env bash - -COLOR:=\\033[36m -NOCOLOR:=\\033[0m - -# Set version variables for LDFLAGS -GIT_VERSION ?= $(shell git describe --tags --always --dirty) -GIT_HASH ?= $(shell git rev-parse HEAD) -DATE_FMT = +%Y-%m-%dT%H:%M:%SZ -SOURCE_DATE_EPOCH ?= $(shell git log -1 --pretty=%ct) -ifdef SOURCE_DATE_EPOCH - BUILD_DATE ?= $(shell date -u -d "@$(SOURCE_DATE_EPOCH)" "$(DATE_FMT)" 2>/dev/null || date -u -r "$(SOURCE_DATE_EPOCH)" "$(DATE_FMT)" 2>/dev/null || date -u "$(DATE_FMT)") -else - BUILD_DATE ?= $(shell date "$(DATE_FMT)") -endif -GIT_TREESTATE = "clean" -DIFF = $(shell git diff --quiet >/dev/null 2>&1; if [ $$? -eq 1 ]; then echo "1"; fi) -ifeq ($(DIFF), 1) - GIT_TREESTATE = "dirty" -endif - -LDFLAGS=-buildid= -X sigs.k8s.io/release-utils/version.gitVersion=$(GIT_VERSION) \ - -X sigs.k8s.io/release-utils/version.gitCommit=$(GIT_HASH) \ - -X sigs.k8s.io/release-utils/version.gitTreeState=$(GIT_TREESTATE) \ - -X sigs.k8s.io/release-utils/version.buildDate=$(BUILD_DATE) - - -##@ Build - -build: ## Build mdtoc - # build local version - go build -trimpath -ldflags "$(LDFLAGS)" -o ./output/mdtoc . - -##@ Verify - -.PHONY: verify verify-boilerplate verify-dependencies verify-go-mod verify-golangci-lint - -verify: verify-boilerplate verify-dependencies verify-go-mod verify-golangci-lint ## Runs verification scripts to ensure correct execution - -verify-boilerplate: ## Runs the file header check - ./hack/verify-boilerplate.sh - -verify-go-mod: ## Runs the go module linter - ./hack/verify-go-mod.sh - -verify-golangci-lint: ## Runs all golang linters - ./hack/verify-golangci-lint.sh - -##@ Tests - -.PHONY: test -test: ## Runs unit tests to ensure correct executionx - ./hack/test-go.sh - -##@ Dependencies - -.SILENT: update-deps update-deps-go -.PHONY: update-deps update-deps-go - -update-deps: update-deps-go ## Update all dependencies for this repo - echo -e "${COLOR}Commit/PR the following changes:${NOCOLOR}" - git status --short - -update-deps-go: GO111MODULE=on -update-deps-go: ## Update all golang dependencies for this repo - go get -u -t ./... - go mod tidy - go mod verify - $(MAKE) test - -## Release - -.PHONY: goreleaser -goreleaser: ## Build zeitgeist binaries with goreleaser - LDFLAGS="$(LDFLAGS)" GIT_HASH=$(GIT_HASH) GIT_VERSION=$(GIT_VERSION) \ - goreleaser release --clean - -.PHONY: snapshot -snapshot: ## Build zeitgeist binaries with goreleaser in snapshot mode - LDFLAGS="$(LDFLAGS)" GIT_HASH=$(GIT_HASH) GIT_VERSION=$(GIT_VERSION) \ - goreleaser release --clean --snapshot --skip=sign,publish - -##@ Helpers - -.PHONY: help - -help: ## Display this help - @awk \ - -v "col=${COLOR}" -v "nocol=${NOCOLOR}" \ - ' \ - BEGIN { \ - FS = ":.*##" ; \ - printf "\nUsage:\n make %s%s\n", col, nocol \ - } \ - /^[a-zA-Z_-]+:.*?##/ { \ - printf " %s%-15s%s %s\n", col, $$1, nocol, $$2 \ - } \ - /^##@/ { \ - printf "\n%s%s%s\n", col, substr($$0, 5), nocol \ - } \ - ' $(MAKEFILE_LIST) diff --git a/vendor/sigs.k8s.io/mdtoc/OWNERS b/vendor/sigs.k8s.io/mdtoc/OWNERS deleted file mode 100644 index 4531871b9e..0000000000 --- a/vendor/sigs.k8s.io/mdtoc/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: - - tallclair - - kubernetes/enhancements-admins - - sig-release-leads diff --git a/vendor/sigs.k8s.io/mdtoc/OWNERS_ALIASES b/vendor/sigs.k8s.io/mdtoc/OWNERS_ALIASES deleted file mode 100644 index 0dd2780475..0000000000 --- a/vendor/sigs.k8s.io/mdtoc/OWNERS_ALIASES +++ /dev/null @@ -1,15 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners#owners_aliases - -aliases: - kubernetes/enhancements-admins: - - justaugustus - - mrbobbytables - - johnbelamaric - - jeremyrickard - sig-release-leads: - - cpanato # SIG Technical Lead - - jeremyrickard # SIG Chair - - justaugustus # SIG Chair - - puerco # SIG Technical Lead - - saschagrunert # SIG Chair - - Verolop # SIG Technical Lead diff --git a/vendor/sigs.k8s.io/mdtoc/README.md b/vendor/sigs.k8s.io/mdtoc/README.md deleted file mode 100644 index 16524c47cf..0000000000 --- a/vendor/sigs.k8s.io/mdtoc/README.md +++ /dev/null @@ -1,84 +0,0 @@ -# Markdown Table of Contents Generator - -`mdtoc` is a utility for generating a table-of-contents for markdown files. - -Only github-flavored markdown is currently supported, but I am open to accepting patches to add -other formats. - -# Table of Contents - -Generated with `mdtoc --inplace README.md` - - -- [Usage](#usage) -- [Installation](#installation) -- [Community, discussion, contribution, and support](#community-discussion-contribution-and-support) - - [Code of conduct](#code-of-conduct) - - -## Usage - -Usage: `mdtoc [OPTIONS] [FILE]...` -Generate a table of contents for a markdown file (github flavor). - -TOC may be wrapped in a pair of tags to allow in-place updates: -``` - -generated TOC goes here - -``` - -TOC indentation is normalized, so the shallowest header has indentation 0. - -**Options:** - -`--dryrun` - Whether to check for changes to TOC, rather than overwriting. -Requires `--inplace` flag. Exit code 1 if there are changes. - -`--inplace` - Whether to edit the file in-place, or output to STDOUT. Requires -toc tags to be present. - -`--skip-prefix` - Whether to ignore any headers before the opening toc -tag. (default true) - -For example, with `--skip-prefix=false` the TOC for this file becomes: - -``` -- [Markdown Table of Contents Generator](#markdown-table-of-contents-generator) -- [Table of Contents](#table-of-contents) - - [Usage](#usage) - - [Installation](#installation) -``` - -## Installation - -On linux, simply download and run the [standalone release -binary](https://github.com/kubernetes-sigs/mdtoc/releases) - -```sh -# Optional: Verify the file integrity - check the release notes for the expected value. -$ sha256sum $BINARY -$ chmod +x $BINARY -``` - -Or, if you have a go development environment set up: - -``` -go get sigs.k8s.io/mdtoc -``` - -## Community, discussion, contribution, and support - -Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/). - -You can reach the maintainers of this project at: - -- [Slack](http://slack.k8s.io/) -- [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-dev) - -### Code of conduct - -Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md). - -[owners]: https://git.k8s.io/community/contributors/guide/owners.md -[Creative Commons 4.0]: https://git.k8s.io/website/LICENSE diff --git a/vendor/sigs.k8s.io/mdtoc/SECURITY_CONTACTS b/vendor/sigs.k8s.io/mdtoc/SECURITY_CONTACTS deleted file mode 100644 index 5d4624308e..0000000000 --- a/vendor/sigs.k8s.io/mdtoc/SECURITY_CONTACTS +++ /dev/null @@ -1,14 +0,0 @@ -# Defined below are the security contacts for this repo. -# -# They are the contact point for the Product Security Committee to reach out -# to for triaging and handling of incoming issues. -# -# The below names agree to abide by the -# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy) -# and will be removed and replaced if they violate that agreement. -# -# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE -# INSTRUCTIONS AT https://kubernetes.io/security/ - -tallclair -kubernetes/enhancements-admins diff --git a/vendor/sigs.k8s.io/mdtoc/code-of-conduct.md b/vendor/sigs.k8s.io/mdtoc/code-of-conduct.md deleted file mode 100644 index 0d15c00cf3..0000000000 --- a/vendor/sigs.k8s.io/mdtoc/code-of-conduct.md +++ /dev/null @@ -1,3 +0,0 @@ -# Kubernetes Community Code of Conduct - -Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/vendor/sigs.k8s.io/mdtoc/mdtoc.go b/vendor/sigs.k8s.io/mdtoc/mdtoc.go deleted file mode 100644 index fce25b5d68..0000000000 --- a/vendor/sigs.k8s.io/mdtoc/mdtoc.go +++ /dev/null @@ -1,104 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "errors" - "flag" - "fmt" - "log" - "os" - - "sigs.k8s.io/mdtoc/pkg/mdtoc" - "sigs.k8s.io/release-utils/version" -) - -type utilityOptions struct { - mdtoc.Options - Inplace bool -} - -var defaultOptions utilityOptions - -func init() { - flag.BoolVar(&defaultOptions.Dryrun, "dryrun", false, "Whether to check for changes to TOC, rather than overwriting. Requires --inplace flag.") - flag.BoolVar(&defaultOptions.Inplace, "inplace", false, "Whether to edit the file in-place, or output to STDOUT. Requires toc tags to be present.") - flag.BoolVar(&defaultOptions.SkipPrefix, "skip-prefix", true, "Whether to ignore any headers before the opening toc tag.") - flag.IntVar(&defaultOptions.MaxDepth, "max-depth", mdtoc.MaxHeaderDepth, "Limit the depth of headers that will be included in the TOC.") - flag.BoolVar(&defaultOptions.Version, "version", false, "Show MDTOC version.") - - flag.Usage = func() { - fmt.Fprintf(flag.CommandLine.Output(), "Usage: %s [OPTIONS] [FILE]...\n", os.Args[0]) - fmt.Fprintf(flag.CommandLine.Output(), "Generate a table of contents for a markdown file (github flavor).\n") - fmt.Fprintf(flag.CommandLine.Output(), "TOC may be wrapped in a pair of tags to allow in-place updates: \n") - flag.PrintDefaults() - } -} - -func main() { - flag.Parse() - - if defaultOptions.Version { - v := version.GetVersionInfo() - v.Name = "mdtoc" - v.Description = "is a utility for generating a table-of-contents for markdown files" - v.ASCIIName = "true" - v.FontName = "banner" - fmt.Fprintln(os.Stdout, v.String()) - os.Exit(0) - } - - if err := validateArgs(defaultOptions, flag.Args()); err != nil { - fmt.Fprintf(os.Stderr, "Error: %v\n", err) - flag.Usage() - os.Exit(1) - } - - switch defaultOptions.Inplace { - case true: - hadError := false - for _, file := range flag.Args() { - err := mdtoc.WriteTOC(file, defaultOptions.Options) - if err != nil { - log.Printf("%s: %v", file, err) - hadError = true - } - } - if hadError { - os.Exit(1) - } - case false: - toc, err := mdtoc.GetTOC(flag.Args()[0], defaultOptions.Options) - if err != nil { - os.Exit(1) - } - fmt.Println(toc) - } -} - -func validateArgs(opts utilityOptions, args []string) error { - if len(args) < 1 { - return errors.New("must specify at least 1 file") - } - if !opts.Inplace && len(args) > 1 { - return errors.New("non-inplace updates require exactly 1 file") - } - if opts.Dryrun && !opts.Inplace { - return errors.New("--dryrun requires --inplace") - } - return nil -} diff --git a/vendor/sigs.k8s.io/mdtoc/pkg/mdtoc/mdtoc.go b/vendor/sigs.k8s.io/mdtoc/pkg/mdtoc/mdtoc.go deleted file mode 100644 index ccf648adcd..0000000000 --- a/vendor/sigs.k8s.io/mdtoc/pkg/mdtoc/mdtoc.go +++ /dev/null @@ -1,285 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mdtoc - -import ( - "bytes" - "errors" - "fmt" - "math" - "os" - "regexp" - "strings" - - "github.com/gomarkdown/markdown/ast" - "github.com/gomarkdown/markdown/html" - "github.com/gomarkdown/markdown/parser" - "github.com/mmarkdown/mmark/mparser" -) - -const ( - // StartTOC is the opening tag for the table of contents. - StartTOC = "" - // EndTOC is the tag that marks the end of the TOC. - EndTOC = "" - // MaxHeaderDepth is the default maximum header depth for ToC generation. - MaxHeaderDepth = 6 -) - -var ( - startTOCRegex = regexp.MustCompile("(?i)" + StartTOC) - endTOCRegex = regexp.MustCompile("(?i)" + EndTOC) -) - -// Options set for the toc generator. -type Options struct { - Dryrun bool - SkipPrefix bool - Version bool - MaxDepth int -} - -// parse parses a raw markdown document to an AST. -func parse(b []byte) ast.Node { - p := parser.NewWithExtensions(parser.CommonExtensions) - p.Opts = parser.Options{ - // mparser is required for parsing the --- title blocks - ParserHook: mparser.Hook, - } - return p.Parse(b) -} - -// GenerateTOC parses a document and returns its TOC. -func GenerateTOC(doc []byte, opts Options) (string, error) { - anchors := make(anchorGen) - - md := parse(doc) - - baseLvl := headingBase(md) - toc := &bytes.Buffer{} - htmlRenderer := html.NewRenderer(html.RendererOptions{}) - walkHeadings(md, func(heading *ast.Heading) { - if opts.MaxDepth > 0 && heading.Level > opts.MaxDepth { - return - } - anchor := anchors.mkAnchor(asText(heading)) - content := headingBody(htmlRenderer, heading) - fmt.Fprintf(toc, "%s- [%s](#%s)\n", strings.Repeat(" ", heading.Level-baseLvl), content, anchor) - }) - - return toc.String(), nil -} - -type headingFn func(heading *ast.Heading) - -// walkHeadings runs the heading function on each heading in the parsed markdown document. -func walkHeadings(doc ast.Node, headingFn headingFn) { - ast.WalkFunc(doc, func(node ast.Node, entering bool) ast.WalkStatus { - if !entering { - return ast.GoToNext // Don't care about closing the heading section. - } - - heading, ok := node.(*ast.Heading) - if !ok { - return ast.GoToNext // Ignore non-heading nodes. - } - - if heading.IsTitleblock { - return ast.GoToNext // Ignore title blocks (the --- section) - } - - headingFn(heading) - - return ast.GoToNext - }) -} - -// anchorGen is used to generate heading anchor IDs, using the github-flavored markdown syntax. -type anchorGen map[string]int - -func (a anchorGen) mkAnchor(text string) string { - text = strings.ToLower(text) - text = punctuation.ReplaceAllString(text, "") - text = strings.ReplaceAll(text, " ", "-") - idx := a[text] - a[text] = idx + 1 - if idx > 0 { - return fmt.Sprintf("%s-%d", text, idx) - } - return text -} - -// Locate the case-insensitive TOC tags. -func findTOCTags(raw []byte) (start, end int) { - if ind := startTOCRegex.FindIndex(raw); len(ind) > 0 { - start = ind[0] - } else { - start = -1 - } - if ind := endTOCRegex.FindIndex(raw); len(ind) > 0 { - end = ind[0] - } else { - end = -1 - } - return -} - -func asText(node ast.Node) (text string) { - ast.WalkFunc(node, func(node ast.Node, entering bool) ast.WalkStatus { - if !entering { - return ast.GoToNext // Don't care about closing the heading section. - } - - switch node.(type) { - case *ast.Text, *ast.Code: - text += string(node.AsLeaf().Literal) - } - - return ast.GoToNext - }) - return text -} - -// Renders the heading body as HTML. -func headingBody(renderer *html.Renderer, heading *ast.Heading) string { - var buf bytes.Buffer - for _, child := range heading.Children { - ast.WalkFunc(child, func(node ast.Node, entering bool) ast.WalkStatus { - return renderer.RenderNode(&buf, node, entering) - }) - } - return strings.TrimSpace(buf.String()) -} - -// headingBase finds the minimum heading level. This is useful for normalizing indentation, such as -// when a top-level heading is skipped in the prefix. -func headingBase(doc ast.Node) int { - baseLvl := math.MaxInt32 - walkHeadings(doc, func(heading *ast.Heading) { - if baseLvl > heading.Level { - baseLvl = heading.Level - } - }) - - return baseLvl -} - -// Match punctuation that is filtered out from anchor IDs. -var punctuation = regexp.MustCompile(`[^\w\- ]`) - -// WriteTOC writes the TOC generator on file with options. -// Returns the generated toc, and any error. -func WriteTOC(file string, opts Options) error { - raw, err := os.ReadFile(file) - if err != nil { - return fmt.Errorf("unable to read %s: %w", file, err) - } - - start, end := findTOCTags(raw) - - if start == -1 { - return errors.New("missing opening TOC tag") - } - if end == -1 { - return errors.New("missing closing TOC tag") - } - if end < start { - return errors.New("TOC closing tag before start tag") - } - - var doc []byte - doc = raw - // skipPrefix is only used when toc tags are present. - if opts.SkipPrefix && start != -1 && end != -1 { - doc = raw[end:] - } - toc, err := GenerateTOC(doc, opts) - if err != nil { - return fmt.Errorf("failed to generate toc: %w", err) - } - - realStart := start + len(StartTOC) - oldTOC := string(raw[realStart:end]) - if strings.TrimSpace(oldTOC) == strings.TrimSpace(toc) { - // No changes required. - return nil - } else if opts.Dryrun { - return fmt.Errorf("changes found:\n%s", toc) - } - - err = atomicWrite(file, - string(raw[:realStart])+"\n", - toc, - string(raw[end:]), - ) - return err -} - -// GetTOC generates the TOC from a file with options. -// Returns the generated toc, and any error. -func GetTOC(file string, opts Options) (string, error) { - doc, err := os.ReadFile(file) - if err != nil { - return "", fmt.Errorf("unable to read %s: %w", file, err) - } - - start, end := findTOCTags(doc) - startPos := 0 - - // skipPrefix is only used when toc tags are present. - if opts.SkipPrefix && start != -1 && end != -1 { - startPos = end - } - toc, err := GenerateTOC(doc[startPos:], opts) - if err != nil { - return toc, fmt.Errorf("failed to generate toc: %w", err) - } - - return toc, err -} - -// atomicWrite writes the chunks sequentially to the filePath. -// A temporary file is used so no changes are made to the original in the case of an error. -func atomicWrite(filePath string, chunks ...string) error { - tmpPath := filePath + "_tmp" - tmp, err := os.OpenFile(tmpPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0o600) - if err != nil { - return fmt.Errorf("unable to open tepmorary file %s: %w", tmpPath, err) - } - - // Cleanup - defer func() { - tmp.Close() - os.Remove(tmpPath) - }() - - for _, chunk := range chunks { - if _, err := tmp.WriteString(chunk); err != nil { - return fmt.Errorf("write temp string: %w", err) - } - } - - if err := tmp.Close(); err != nil { - return fmt.Errorf("close temp file: %w", err) - } - - if err := os.Rename(tmp.Name(), filePath); err != nil { - return fmt.Errorf("rename temp file: %w", err) - } - - return nil -}