From 1a57267a17c2fc17fb6e104846fabc3e363c326c Mon Sep 17 00:00:00 2001 From: Emile Date: Fri, 16 Aug 2024 19:50:26 +0200 Subject: initial commit --- vendor/github.com/dustin/go-humanize/.travis.yml | 21 + vendor/github.com/dustin/go-humanize/LICENSE | 21 + .../github.com/dustin/go-humanize/README.markdown | 124 ++++ vendor/github.com/dustin/go-humanize/big.go | 31 + vendor/github.com/dustin/go-humanize/bigbytes.go | 189 +++++ vendor/github.com/dustin/go-humanize/bytes.go | 143 ++++ vendor/github.com/dustin/go-humanize/comma.go | 116 +++ vendor/github.com/dustin/go-humanize/commaf.go | 41 ++ vendor/github.com/dustin/go-humanize/ftoa.go | 49 ++ vendor/github.com/dustin/go-humanize/humanize.go | 8 + vendor/github.com/dustin/go-humanize/number.go | 192 +++++ vendor/github.com/dustin/go-humanize/ordinals.go | 25 + vendor/github.com/dustin/go-humanize/si.go | 127 ++++ vendor/github.com/dustin/go-humanize/times.go | 117 +++ vendor/github.com/felixge/httpsnoop/.gitignore | 0 vendor/github.com/felixge/httpsnoop/.travis.yml | 6 + vendor/github.com/felixge/httpsnoop/LICENSE.txt | 19 + vendor/github.com/felixge/httpsnoop/Makefile | 10 + vendor/github.com/felixge/httpsnoop/README.md | 95 +++ .../felixge/httpsnoop/capture_metrics.go | 86 +++ vendor/github.com/felixge/httpsnoop/docs.go | 10 + .../felixge/httpsnoop/wrap_generated_gteq_1.8.go | 436 +++++++++++ .../felixge/httpsnoop/wrap_generated_lt_1.8.go | 278 +++++++ vendor/github.com/google/uuid/CHANGELOG.md | 41 ++ vendor/github.com/google/uuid/CONTRIBUTING.md | 26 + vendor/github.com/google/uuid/CONTRIBUTORS | 9 + vendor/github.com/google/uuid/LICENSE | 27 + vendor/github.com/google/uuid/README.md | 21 + vendor/github.com/google/uuid/dce.go | 80 ++ vendor/github.com/google/uuid/doc.go | 12 + vendor/github.com/google/uuid/hash.go | 59 ++ vendor/github.com/google/uuid/marshal.go | 38 + vendor/github.com/google/uuid/node.go | 90 +++ vendor/github.com/google/uuid/node_js.go | 12 + vendor/github.com/google/uuid/node_net.go | 33 + vendor/github.com/google/uuid/null.go | 118 +++ vendor/github.com/google/uuid/sql.go | 59 ++ vendor/github.com/google/uuid/time.go | 134 ++++ vendor/github.com/google/uuid/util.go | 43 ++ vendor/github.com/google/uuid/uuid.go | 365 +++++++++ vendor/github.com/google/uuid/version1.go | 44 ++ vendor/github.com/google/uuid/version4.go | 76 ++ vendor/github.com/google/uuid/version6.go | 56 ++ vendor/github.com/google/uuid/version7.go | 104 +++ vendor/github.com/gorilla/handlers/.editorconfig | 20 + vendor/github.com/gorilla/handlers/.gitignore | 2 + vendor/github.com/gorilla/handlers/LICENSE | 27 + vendor/github.com/gorilla/handlers/Makefile | 34 + vendor/github.com/gorilla/handlers/README.md | 56 ++ vendor/github.com/gorilla/handlers/canonical.go | 73 ++ vendor/github.com/gorilla/handlers/compress.go | 143 ++++ vendor/github.com/gorilla/handlers/cors.go | 352 +++++++++ vendor/github.com/gorilla/handlers/doc.go | 9 + vendor/github.com/gorilla/handlers/handlers.go | 150 ++++ vendor/github.com/gorilla/handlers/logging.go | 246 +++++++ .../github.com/gorilla/handlers/proxy_headers.go | 120 +++ vendor/github.com/gorilla/handlers/recovery.go | 98 +++ vendor/github.com/gorilla/mux/.editorconfig | 20 + vendor/github.com/gorilla/mux/.gitignore | 1 + vendor/github.com/gorilla/mux/LICENSE | 27 + vendor/github.com/gorilla/mux/Makefile | 34 + vendor/github.com/gorilla/mux/README.md | 812 +++++++++++++++++++++ vendor/github.com/gorilla/mux/doc.go | 305 ++++++++ vendor/github.com/gorilla/mux/middleware.go | 74 ++ vendor/github.com/gorilla/mux/mux.go | 608 +++++++++++++++ vendor/github.com/gorilla/mux/regexp.go | 388 ++++++++++ vendor/github.com/gorilla/mux/route.go | 765 +++++++++++++++++++ vendor/github.com/gorilla/mux/test_helpers.go | 19 + .../github.com/gorilla/securecookie/.editorconfig | 20 + vendor/github.com/gorilla/securecookie/.gitignore | 1 + vendor/github.com/gorilla/securecookie/LICENSE | 27 + vendor/github.com/gorilla/securecookie/Makefile | 39 + vendor/github.com/gorilla/securecookie/README.md | 144 ++++ vendor/github.com/gorilla/securecookie/doc.go | 61 ++ .../gorilla/securecookie/securecookie.go | 649 ++++++++++++++++ vendor/github.com/gorilla/sessions/.editorconfig | 20 + vendor/github.com/gorilla/sessions/.gitignore | 1 + vendor/github.com/gorilla/sessions/LICENSE | 27 + vendor/github.com/gorilla/sessions/Makefile | 34 + vendor/github.com/gorilla/sessions/README.md | 94 +++ vendor/github.com/gorilla/sessions/cookie.go | 20 + vendor/github.com/gorilla/sessions/cookie_go111.go | 21 + vendor/github.com/gorilla/sessions/doc.go | 207 ++++++ vendor/github.com/gorilla/sessions/lex.go | 102 +++ vendor/github.com/gorilla/sessions/options.go | 19 + .../github.com/gorilla/sessions/options_go111.go | 23 + vendor/github.com/gorilla/sessions/sessions.go | 218 ++++++ vendor/github.com/gorilla/sessions/store.go | 298 ++++++++ .../github.com/hashicorp/golang-lru/v2/.gitignore | 23 + .../hashicorp/golang-lru/v2/.golangci.yml | 46 ++ vendor/github.com/hashicorp/golang-lru/v2/2q.go | 267 +++++++ vendor/github.com/hashicorp/golang-lru/v2/LICENSE | 364 +++++++++ .../github.com/hashicorp/golang-lru/v2/README.md | 79 ++ vendor/github.com/hashicorp/golang-lru/v2/doc.go | 24 + .../hashicorp/golang-lru/v2/internal/list.go | 142 ++++ vendor/github.com/hashicorp/golang-lru/v2/lru.go | 250 +++++++ .../hashicorp/golang-lru/v2/simplelru/LICENSE_list | 29 + .../hashicorp/golang-lru/v2/simplelru/lru.go | 177 +++++ .../golang-lru/v2/simplelru/lru_interface.go | 46 ++ vendor/github.com/mattn/go-isatty/LICENSE | 9 + vendor/github.com/mattn/go-isatty/README.md | 50 ++ vendor/github.com/mattn/go-isatty/doc.go | 2 + vendor/github.com/mattn/go-isatty/go.test.sh | 12 + vendor/github.com/mattn/go-isatty/isatty_bsd.go | 20 + vendor/github.com/mattn/go-isatty/isatty_others.go | 17 + vendor/github.com/mattn/go-isatty/isatty_plan9.go | 23 + .../github.com/mattn/go-isatty/isatty_solaris.go | 21 + vendor/github.com/mattn/go-isatty/isatty_tcgets.go | 20 + .../github.com/mattn/go-isatty/isatty_windows.go | 125 ++++ vendor/github.com/ncruces/go-strftime/.gitignore | 15 + vendor/github.com/ncruces/go-strftime/LICENSE | 21 + vendor/github.com/ncruces/go-strftime/README.md | 5 + vendor/github.com/ncruces/go-strftime/parser.go | 107 +++ vendor/github.com/ncruces/go-strftime/pkg.go | 96 +++ .../github.com/ncruces/go-strftime/specifiers.go | 241 ++++++ vendor/github.com/ncruces/go-strftime/strftime.go | 324 ++++++++ vendor/github.com/radareorg/r2pipe-go/LICENSE | 23 + vendor/github.com/radareorg/r2pipe-go/Makefile | 12 + vendor/github.com/radareorg/r2pipe-go/README.md | 17 + vendor/github.com/radareorg/r2pipe-go/api.go | 47 ++ vendor/github.com/radareorg/r2pipe-go/native.go | 161 ++++ vendor/github.com/radareorg/r2pipe-go/r2pipe.go | 243 ++++++ vendor/github.com/remyoudompheng/bigfft/LICENSE | 27 + vendor/github.com/remyoudompheng/bigfft/README | 54 ++ .../github.com/remyoudompheng/bigfft/arith_decl.go | 33 + vendor/github.com/remyoudompheng/bigfft/fermat.go | 216 ++++++ vendor/github.com/remyoudompheng/bigfft/fft.go | 370 ++++++++++ vendor/github.com/remyoudompheng/bigfft/scan.go | 70 ++ 128 files changed, 13827 insertions(+) create mode 100644 vendor/github.com/dustin/go-humanize/.travis.yml create mode 100644 vendor/github.com/dustin/go-humanize/LICENSE create mode 100644 vendor/github.com/dustin/go-humanize/README.markdown create mode 100644 vendor/github.com/dustin/go-humanize/big.go create mode 100644 vendor/github.com/dustin/go-humanize/bigbytes.go create mode 100644 vendor/github.com/dustin/go-humanize/bytes.go create mode 100644 vendor/github.com/dustin/go-humanize/comma.go create mode 100644 vendor/github.com/dustin/go-humanize/commaf.go create mode 100644 vendor/github.com/dustin/go-humanize/ftoa.go create mode 100644 vendor/github.com/dustin/go-humanize/humanize.go create mode 100644 vendor/github.com/dustin/go-humanize/number.go create mode 100644 vendor/github.com/dustin/go-humanize/ordinals.go create mode 100644 vendor/github.com/dustin/go-humanize/si.go create mode 100644 vendor/github.com/dustin/go-humanize/times.go create mode 100644 vendor/github.com/felixge/httpsnoop/.gitignore create mode 100644 vendor/github.com/felixge/httpsnoop/.travis.yml create mode 100644 vendor/github.com/felixge/httpsnoop/LICENSE.txt create mode 100644 vendor/github.com/felixge/httpsnoop/Makefile create mode 100644 vendor/github.com/felixge/httpsnoop/README.md create mode 100644 vendor/github.com/felixge/httpsnoop/capture_metrics.go create mode 100644 vendor/github.com/felixge/httpsnoop/docs.go create mode 100644 vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go create mode 100644 vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go create mode 100644 vendor/github.com/google/uuid/CHANGELOG.md create mode 100644 vendor/github.com/google/uuid/CONTRIBUTING.md create mode 100644 vendor/github.com/google/uuid/CONTRIBUTORS create mode 100644 vendor/github.com/google/uuid/LICENSE create mode 100644 vendor/github.com/google/uuid/README.md create mode 100644 vendor/github.com/google/uuid/dce.go create mode 100644 vendor/github.com/google/uuid/doc.go create mode 100644 vendor/github.com/google/uuid/hash.go create mode 100644 vendor/github.com/google/uuid/marshal.go create mode 100644 vendor/github.com/google/uuid/node.go create mode 100644 vendor/github.com/google/uuid/node_js.go create mode 100644 vendor/github.com/google/uuid/node_net.go create mode 100644 vendor/github.com/google/uuid/null.go create mode 100644 vendor/github.com/google/uuid/sql.go create mode 100644 vendor/github.com/google/uuid/time.go create mode 100644 vendor/github.com/google/uuid/util.go create mode 100644 vendor/github.com/google/uuid/uuid.go create mode 100644 vendor/github.com/google/uuid/version1.go create mode 100644 vendor/github.com/google/uuid/version4.go create mode 100644 vendor/github.com/google/uuid/version6.go create mode 100644 vendor/github.com/google/uuid/version7.go create mode 100644 vendor/github.com/gorilla/handlers/.editorconfig create mode 100644 vendor/github.com/gorilla/handlers/.gitignore create mode 100644 vendor/github.com/gorilla/handlers/LICENSE create mode 100644 vendor/github.com/gorilla/handlers/Makefile create mode 100644 vendor/github.com/gorilla/handlers/README.md create mode 100644 vendor/github.com/gorilla/handlers/canonical.go create mode 100644 vendor/github.com/gorilla/handlers/compress.go create mode 100644 vendor/github.com/gorilla/handlers/cors.go create mode 100644 vendor/github.com/gorilla/handlers/doc.go create mode 100644 vendor/github.com/gorilla/handlers/handlers.go create mode 100644 vendor/github.com/gorilla/handlers/logging.go create mode 100644 vendor/github.com/gorilla/handlers/proxy_headers.go create mode 100644 vendor/github.com/gorilla/handlers/recovery.go create mode 100644 vendor/github.com/gorilla/mux/.editorconfig create mode 100644 vendor/github.com/gorilla/mux/.gitignore create mode 100644 vendor/github.com/gorilla/mux/LICENSE create mode 100644 vendor/github.com/gorilla/mux/Makefile create mode 100644 vendor/github.com/gorilla/mux/README.md create mode 100644 vendor/github.com/gorilla/mux/doc.go create mode 100644 vendor/github.com/gorilla/mux/middleware.go create mode 100644 vendor/github.com/gorilla/mux/mux.go create mode 100644 vendor/github.com/gorilla/mux/regexp.go create mode 100644 vendor/github.com/gorilla/mux/route.go create mode 100644 vendor/github.com/gorilla/mux/test_helpers.go create mode 100644 vendor/github.com/gorilla/securecookie/.editorconfig create mode 100644 vendor/github.com/gorilla/securecookie/.gitignore create mode 100644 vendor/github.com/gorilla/securecookie/LICENSE create mode 100644 vendor/github.com/gorilla/securecookie/Makefile create mode 100644 vendor/github.com/gorilla/securecookie/README.md create mode 100644 vendor/github.com/gorilla/securecookie/doc.go create mode 100644 vendor/github.com/gorilla/securecookie/securecookie.go create mode 100644 vendor/github.com/gorilla/sessions/.editorconfig create mode 100644 vendor/github.com/gorilla/sessions/.gitignore create mode 100644 vendor/github.com/gorilla/sessions/LICENSE create mode 100644 vendor/github.com/gorilla/sessions/Makefile create mode 100644 vendor/github.com/gorilla/sessions/README.md create mode 100644 vendor/github.com/gorilla/sessions/cookie.go create mode 100644 vendor/github.com/gorilla/sessions/cookie_go111.go create mode 100644 vendor/github.com/gorilla/sessions/doc.go create mode 100644 vendor/github.com/gorilla/sessions/lex.go create mode 100644 vendor/github.com/gorilla/sessions/options.go create mode 100644 vendor/github.com/gorilla/sessions/options_go111.go create mode 100644 vendor/github.com/gorilla/sessions/sessions.go create mode 100644 vendor/github.com/gorilla/sessions/store.go create mode 100644 vendor/github.com/hashicorp/golang-lru/v2/.gitignore create mode 100644 vendor/github.com/hashicorp/golang-lru/v2/.golangci.yml create mode 100644 vendor/github.com/hashicorp/golang-lru/v2/2q.go create mode 100644 vendor/github.com/hashicorp/golang-lru/v2/LICENSE create mode 100644 vendor/github.com/hashicorp/golang-lru/v2/README.md create mode 100644 vendor/github.com/hashicorp/golang-lru/v2/doc.go create mode 100644 vendor/github.com/hashicorp/golang-lru/v2/internal/list.go create mode 100644 vendor/github.com/hashicorp/golang-lru/v2/lru.go create mode 100644 vendor/github.com/hashicorp/golang-lru/v2/simplelru/LICENSE_list create mode 100644 vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru.go create mode 100644 vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru_interface.go create mode 100644 vendor/github.com/mattn/go-isatty/LICENSE create mode 100644 vendor/github.com/mattn/go-isatty/README.md create mode 100644 vendor/github.com/mattn/go-isatty/doc.go create mode 100644 vendor/github.com/mattn/go-isatty/go.test.sh create mode 100644 vendor/github.com/mattn/go-isatty/isatty_bsd.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_others.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_plan9.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_solaris.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_tcgets.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_windows.go create mode 100644 vendor/github.com/ncruces/go-strftime/.gitignore create mode 100644 vendor/github.com/ncruces/go-strftime/LICENSE create mode 100644 vendor/github.com/ncruces/go-strftime/README.md create mode 100644 vendor/github.com/ncruces/go-strftime/parser.go create mode 100644 vendor/github.com/ncruces/go-strftime/pkg.go create mode 100644 vendor/github.com/ncruces/go-strftime/specifiers.go create mode 100644 vendor/github.com/ncruces/go-strftime/strftime.go create mode 100644 vendor/github.com/radareorg/r2pipe-go/LICENSE create mode 100644 vendor/github.com/radareorg/r2pipe-go/Makefile create mode 100644 vendor/github.com/radareorg/r2pipe-go/README.md create mode 100644 vendor/github.com/radareorg/r2pipe-go/api.go create mode 100644 vendor/github.com/radareorg/r2pipe-go/native.go create mode 100644 vendor/github.com/radareorg/r2pipe-go/r2pipe.go create mode 100644 vendor/github.com/remyoudompheng/bigfft/LICENSE create mode 100644 vendor/github.com/remyoudompheng/bigfft/README create mode 100644 vendor/github.com/remyoudompheng/bigfft/arith_decl.go create mode 100644 vendor/github.com/remyoudompheng/bigfft/fermat.go create mode 100644 vendor/github.com/remyoudompheng/bigfft/fft.go create mode 100644 vendor/github.com/remyoudompheng/bigfft/scan.go (limited to 'vendor/github.com') diff --git a/vendor/github.com/dustin/go-humanize/.travis.yml b/vendor/github.com/dustin/go-humanize/.travis.yml new file mode 100644 index 0000000..ac12e48 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/.travis.yml @@ -0,0 +1,21 @@ +sudo: false +language: go +go_import_path: github.com/dustin/go-humanize +go: + - 1.13.x + - 1.14.x + - 1.15.x + - 1.16.x + - stable + - master +matrix: + allow_failures: + - go: master + fast_finish: true +install: + - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). +script: + - diff -u <(echo -n) <(gofmt -d -s .) + - go vet . + - go install -v -race ./... + - go test -v -race ./... diff --git a/vendor/github.com/dustin/go-humanize/LICENSE b/vendor/github.com/dustin/go-humanize/LICENSE new file mode 100644 index 0000000..8d9a94a --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/LICENSE @@ -0,0 +1,21 @@ +Copyright (c) 2005-2008 Dustin Sallings + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + diff --git a/vendor/github.com/dustin/go-humanize/README.markdown b/vendor/github.com/dustin/go-humanize/README.markdown new file mode 100644 index 0000000..7d0b16b --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/README.markdown @@ -0,0 +1,124 @@ +# Humane Units [![Build Status](https://travis-ci.org/dustin/go-humanize.svg?branch=master)](https://travis-ci.org/dustin/go-humanize) [![GoDoc](https://godoc.org/github.com/dustin/go-humanize?status.svg)](https://godoc.org/github.com/dustin/go-humanize) + +Just a few functions for helping humanize times and sizes. + +`go get` it as `github.com/dustin/go-humanize`, import it as +`"github.com/dustin/go-humanize"`, use it as `humanize`. + +See [godoc](https://pkg.go.dev/github.com/dustin/go-humanize) for +complete documentation. + +## Sizes + +This lets you take numbers like `82854982` and convert them to useful +strings like, `83 MB` or `79 MiB` (whichever you prefer). + +Example: + +```go +fmt.Printf("That file is %s.", humanize.Bytes(82854982)) // That file is 83 MB. +``` + +## Times + +This lets you take a `time.Time` and spit it out in relative terms. +For example, `12 seconds ago` or `3 days from now`. + +Example: + +```go +fmt.Printf("This was touched %s.", humanize.Time(someTimeInstance)) // This was touched 7 hours ago. +``` + +Thanks to Kyle Lemons for the time implementation from an IRC +conversation one day. It's pretty neat. + +## Ordinals + +From a [mailing list discussion][odisc] where a user wanted to be able +to label ordinals. + + 0 -> 0th + 1 -> 1st + 2 -> 2nd + 3 -> 3rd + 4 -> 4th + [...] + +Example: + +```go +fmt.Printf("You're my %s best friend.", humanize.Ordinal(193)) // You are my 193rd best friend. +``` + +## Commas + +Want to shove commas into numbers? Be my guest. + + 0 -> 0 + 100 -> 100 + 1000 -> 1,000 + 1000000000 -> 1,000,000,000 + -100000 -> -100,000 + +Example: + +```go +fmt.Printf("You owe $%s.\n", humanize.Comma(6582491)) // You owe $6,582,491. +``` + +## Ftoa + +Nicer float64 formatter that removes trailing zeros. + +```go +fmt.Printf("%f", 2.24) // 2.240000 +fmt.Printf("%s", humanize.Ftoa(2.24)) // 2.24 +fmt.Printf("%f", 2.0) // 2.000000 +fmt.Printf("%s", humanize.Ftoa(2.0)) // 2 +``` + +## SI notation + +Format numbers with [SI notation][sinotation]. + +Example: + +```go +humanize.SI(0.00000000223, "M") // 2.23 nM +``` + +## English-specific functions + +The following functions are in the `humanize/english` subpackage. + +### Plurals + +Simple English pluralization + +```go +english.PluralWord(1, "object", "") // object +english.PluralWord(42, "object", "") // objects +english.PluralWord(2, "bus", "") // buses +english.PluralWord(99, "locus", "loci") // loci + +english.Plural(1, "object", "") // 1 object +english.Plural(42, "object", "") // 42 objects +english.Plural(2, "bus", "") // 2 buses +english.Plural(99, "locus", "loci") // 99 loci +``` + +### Word series + +Format comma-separated words lists with conjuctions: + +```go +english.WordSeries([]string{"foo"}, "and") // foo +english.WordSeries([]string{"foo", "bar"}, "and") // foo and bar +english.WordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar and baz + +english.OxfordWordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar, and baz +``` + +[odisc]: https://groups.google.com/d/topic/golang-nuts/l8NhI74jl-4/discussion +[sinotation]: http://en.wikipedia.org/wiki/Metric_prefix diff --git a/vendor/github.com/dustin/go-humanize/big.go b/vendor/github.com/dustin/go-humanize/big.go new file mode 100644 index 0000000..f49dc33 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/big.go @@ -0,0 +1,31 @@ +package humanize + +import ( + "math/big" +) + +// order of magnitude (to a max order) +func oomm(n, b *big.Int, maxmag int) (float64, int) { + mag := 0 + m := &big.Int{} + for n.Cmp(b) >= 0 { + n.DivMod(n, b, m) + mag++ + if mag == maxmag && maxmag >= 0 { + break + } + } + return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag +} + +// total order of magnitude +// (same as above, but with no upper limit) +func oom(n, b *big.Int) (float64, int) { + mag := 0 + m := &big.Int{} + for n.Cmp(b) >= 0 { + n.DivMod(n, b, m) + mag++ + } + return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag +} diff --git a/vendor/github.com/dustin/go-humanize/bigbytes.go b/vendor/github.com/dustin/go-humanize/bigbytes.go new file mode 100644 index 0000000..3b015fd --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/bigbytes.go @@ -0,0 +1,189 @@ +package humanize + +import ( + "fmt" + "math/big" + "strings" + "unicode" +) + +var ( + bigIECExp = big.NewInt(1024) + + // BigByte is one byte in bit.Ints + BigByte = big.NewInt(1) + // BigKiByte is 1,024 bytes in bit.Ints + BigKiByte = (&big.Int{}).Mul(BigByte, bigIECExp) + // BigMiByte is 1,024 k bytes in bit.Ints + BigMiByte = (&big.Int{}).Mul(BigKiByte, bigIECExp) + // BigGiByte is 1,024 m bytes in bit.Ints + BigGiByte = (&big.Int{}).Mul(BigMiByte, bigIECExp) + // BigTiByte is 1,024 g bytes in bit.Ints + BigTiByte = (&big.Int{}).Mul(BigGiByte, bigIECExp) + // BigPiByte is 1,024 t bytes in bit.Ints + BigPiByte = (&big.Int{}).Mul(BigTiByte, bigIECExp) + // BigEiByte is 1,024 p bytes in bit.Ints + BigEiByte = (&big.Int{}).Mul(BigPiByte, bigIECExp) + // BigZiByte is 1,024 e bytes in bit.Ints + BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp) + // BigYiByte is 1,024 z bytes in bit.Ints + BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp) + // BigRiByte is 1,024 y bytes in bit.Ints + BigRiByte = (&big.Int{}).Mul(BigYiByte, bigIECExp) + // BigQiByte is 1,024 r bytes in bit.Ints + BigQiByte = (&big.Int{}).Mul(BigRiByte, bigIECExp) +) + +var ( + bigSIExp = big.NewInt(1000) + + // BigSIByte is one SI byte in big.Ints + BigSIByte = big.NewInt(1) + // BigKByte is 1,000 SI bytes in big.Ints + BigKByte = (&big.Int{}).Mul(BigSIByte, bigSIExp) + // BigMByte is 1,000 SI k bytes in big.Ints + BigMByte = (&big.Int{}).Mul(BigKByte, bigSIExp) + // BigGByte is 1,000 SI m bytes in big.Ints + BigGByte = (&big.Int{}).Mul(BigMByte, bigSIExp) + // BigTByte is 1,000 SI g bytes in big.Ints + BigTByte = (&big.Int{}).Mul(BigGByte, bigSIExp) + // BigPByte is 1,000 SI t bytes in big.Ints + BigPByte = (&big.Int{}).Mul(BigTByte, bigSIExp) + // BigEByte is 1,000 SI p bytes in big.Ints + BigEByte = (&big.Int{}).Mul(BigPByte, bigSIExp) + // BigZByte is 1,000 SI e bytes in big.Ints + BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp) + // BigYByte is 1,000 SI z bytes in big.Ints + BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp) + // BigRByte is 1,000 SI y bytes in big.Ints + BigRByte = (&big.Int{}).Mul(BigYByte, bigSIExp) + // BigQByte is 1,000 SI r bytes in big.Ints + BigQByte = (&big.Int{}).Mul(BigRByte, bigSIExp) +) + +var bigBytesSizeTable = map[string]*big.Int{ + "b": BigByte, + "kib": BigKiByte, + "kb": BigKByte, + "mib": BigMiByte, + "mb": BigMByte, + "gib": BigGiByte, + "gb": BigGByte, + "tib": BigTiByte, + "tb": BigTByte, + "pib": BigPiByte, + "pb": BigPByte, + "eib": BigEiByte, + "eb": BigEByte, + "zib": BigZiByte, + "zb": BigZByte, + "yib": BigYiByte, + "yb": BigYByte, + "rib": BigRiByte, + "rb": BigRByte, + "qib": BigQiByte, + "qb": BigQByte, + // Without suffix + "": BigByte, + "ki": BigKiByte, + "k": BigKByte, + "mi": BigMiByte, + "m": BigMByte, + "gi": BigGiByte, + "g": BigGByte, + "ti": BigTiByte, + "t": BigTByte, + "pi": BigPiByte, + "p": BigPByte, + "ei": BigEiByte, + "e": BigEByte, + "z": BigZByte, + "zi": BigZiByte, + "y": BigYByte, + "yi": BigYiByte, + "r": BigRByte, + "ri": BigRiByte, + "q": BigQByte, + "qi": BigQiByte, +} + +var ten = big.NewInt(10) + +func humanateBigBytes(s, base *big.Int, sizes []string) string { + if s.Cmp(ten) < 0 { + return fmt.Sprintf("%d B", s) + } + c := (&big.Int{}).Set(s) + val, mag := oomm(c, base, len(sizes)-1) + suffix := sizes[mag] + f := "%.0f %s" + if val < 10 { + f = "%.1f %s" + } + + return fmt.Sprintf(f, val, suffix) + +} + +// BigBytes produces a human readable representation of an SI size. +// +// See also: ParseBigBytes. +// +// BigBytes(82854982) -> 83 MB +func BigBytes(s *big.Int) string { + sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB", "RB", "QB"} + return humanateBigBytes(s, bigSIExp, sizes) +} + +// BigIBytes produces a human readable representation of an IEC size. +// +// See also: ParseBigBytes. +// +// BigIBytes(82854982) -> 79 MiB +func BigIBytes(s *big.Int) string { + sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB", "RiB", "QiB"} + return humanateBigBytes(s, bigIECExp, sizes) +} + +// ParseBigBytes parses a string representation of bytes into the number +// of bytes it represents. +// +// See also: BigBytes, BigIBytes. +// +// ParseBigBytes("42 MB") -> 42000000, nil +// ParseBigBytes("42 mib") -> 44040192, nil +func ParseBigBytes(s string) (*big.Int, error) { + lastDigit := 0 + hasComma := false + for _, r := range s { + if !(unicode.IsDigit(r) || r == '.' || r == ',') { + break + } + if r == ',' { + hasComma = true + } + lastDigit++ + } + + num := s[:lastDigit] + if hasComma { + num = strings.Replace(num, ",", "", -1) + } + + val := &big.Rat{} + _, err := fmt.Sscanf(num, "%f", val) + if err != nil { + return nil, err + } + + extra := strings.ToLower(strings.TrimSpace(s[lastDigit:])) + if m, ok := bigBytesSizeTable[extra]; ok { + mv := (&big.Rat{}).SetInt(m) + val.Mul(val, mv) + rv := &big.Int{} + rv.Div(val.Num(), val.Denom()) + return rv, nil + } + + return nil, fmt.Errorf("unhandled size name: %v", extra) +} diff --git a/vendor/github.com/dustin/go-humanize/bytes.go b/vendor/github.com/dustin/go-humanize/bytes.go new file mode 100644 index 0000000..0b498f4 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/bytes.go @@ -0,0 +1,143 @@ +package humanize + +import ( + "fmt" + "math" + "strconv" + "strings" + "unicode" +) + +// IEC Sizes. +// kibis of bits +const ( + Byte = 1 << (iota * 10) + KiByte + MiByte + GiByte + TiByte + PiByte + EiByte +) + +// SI Sizes. +const ( + IByte = 1 + KByte = IByte * 1000 + MByte = KByte * 1000 + GByte = MByte * 1000 + TByte = GByte * 1000 + PByte = TByte * 1000 + EByte = PByte * 1000 +) + +var bytesSizeTable = map[string]uint64{ + "b": Byte, + "kib": KiByte, + "kb": KByte, + "mib": MiByte, + "mb": MByte, + "gib": GiByte, + "gb": GByte, + "tib": TiByte, + "tb": TByte, + "pib": PiByte, + "pb": PByte, + "eib": EiByte, + "eb": EByte, + // Without suffix + "": Byte, + "ki": KiByte, + "k": KByte, + "mi": MiByte, + "m": MByte, + "gi": GiByte, + "g": GByte, + "ti": TiByte, + "t": TByte, + "pi": PiByte, + "p": PByte, + "ei": EiByte, + "e": EByte, +} + +func logn(n, b float64) float64 { + return math.Log(n) / math.Log(b) +} + +func humanateBytes(s uint64, base float64, sizes []string) string { + if s < 10 { + return fmt.Sprintf("%d B", s) + } + e := math.Floor(logn(float64(s), base)) + suffix := sizes[int(e)] + val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10 + f := "%.0f %s" + if val < 10 { + f = "%.1f %s" + } + + return fmt.Sprintf(f, val, suffix) +} + +// Bytes produces a human readable representation of an SI size. +// +// See also: ParseBytes. +// +// Bytes(82854982) -> 83 MB +func Bytes(s uint64) string { + sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB"} + return humanateBytes(s, 1000, sizes) +} + +// IBytes produces a human readable representation of an IEC size. +// +// See also: ParseBytes. +// +// IBytes(82854982) -> 79 MiB +func IBytes(s uint64) string { + sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"} + return humanateBytes(s, 1024, sizes) +} + +// ParseBytes parses a string representation of bytes into the number +// of bytes it represents. +// +// See Also: Bytes, IBytes. +// +// ParseBytes("42 MB") -> 42000000, nil +// ParseBytes("42 mib") -> 44040192, nil +func ParseBytes(s string) (uint64, error) { + lastDigit := 0 + hasComma := false + for _, r := range s { + if !(unicode.IsDigit(r) || r == '.' || r == ',') { + break + } + if r == ',' { + hasComma = true + } + lastDigit++ + } + + num := s[:lastDigit] + if hasComma { + num = strings.Replace(num, ",", "", -1) + } + + f, err := strconv.ParseFloat(num, 64) + if err != nil { + return 0, err + } + + extra := strings.ToLower(strings.TrimSpace(s[lastDigit:])) + if m, ok := bytesSizeTable[extra]; ok { + f *= float64(m) + if f >= math.MaxUint64 { + return 0, fmt.Errorf("too large: %v", s) + } + return uint64(f), nil + } + + return 0, fmt.Errorf("unhandled size name: %v", extra) +} diff --git a/vendor/github.com/dustin/go-humanize/comma.go b/vendor/github.com/dustin/go-humanize/comma.go new file mode 100644 index 0000000..520ae3e --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/comma.go @@ -0,0 +1,116 @@ +package humanize + +import ( + "bytes" + "math" + "math/big" + "strconv" + "strings" +) + +// Comma produces a string form of the given number in base 10 with +// commas after every three orders of magnitude. +// +// e.g. Comma(834142) -> 834,142 +func Comma(v int64) string { + sign := "" + + // Min int64 can't be negated to a usable value, so it has to be special cased. + if v == math.MinInt64 { + return "-9,223,372,036,854,775,808" + } + + if v < 0 { + sign = "-" + v = 0 - v + } + + parts := []string{"", "", "", "", "", "", ""} + j := len(parts) - 1 + + for v > 999 { + parts[j] = strconv.FormatInt(v%1000, 10) + switch len(parts[j]) { + case 2: + parts[j] = "0" + parts[j] + case 1: + parts[j] = "00" + parts[j] + } + v = v / 1000 + j-- + } + parts[j] = strconv.Itoa(int(v)) + return sign + strings.Join(parts[j:], ",") +} + +// Commaf produces a string form of the given number in base 10 with +// commas after every three orders of magnitude. +// +// e.g. Commaf(834142.32) -> 834,142.32 +func Commaf(v float64) string { + buf := &bytes.Buffer{} + if v < 0 { + buf.Write([]byte{'-'}) + v = 0 - v + } + + comma := []byte{','} + + parts := strings.Split(strconv.FormatFloat(v, 'f', -1, 64), ".") + pos := 0 + if len(parts[0])%3 != 0 { + pos += len(parts[0]) % 3 + buf.WriteString(parts[0][:pos]) + buf.Write(comma) + } + for ; pos < len(parts[0]); pos += 3 { + buf.WriteString(parts[0][pos : pos+3]) + buf.Write(comma) + } + buf.Truncate(buf.Len() - 1) + + if len(parts) > 1 { + buf.Write([]byte{'.'}) + buf.WriteString(parts[1]) + } + return buf.String() +} + +// CommafWithDigits works like the Commaf but limits the resulting +// string to the given number of decimal places. +// +// e.g. CommafWithDigits(834142.32, 1) -> 834,142.3 +func CommafWithDigits(f float64, decimals int) string { + return stripTrailingDigits(Commaf(f), decimals) +} + +// BigComma produces a string form of the given big.Int in base 10 +// with commas after every three orders of magnitude. +func BigComma(b *big.Int) string { + sign := "" + if b.Sign() < 0 { + sign = "-" + b.Abs(b) + } + + athousand := big.NewInt(1000) + c := (&big.Int{}).Set(b) + _, m := oom(c, athousand) + parts := make([]string, m+1) + j := len(parts) - 1 + + mod := &big.Int{} + for b.Cmp(athousand) >= 0 { + b.DivMod(b, athousand, mod) + parts[j] = strconv.FormatInt(mod.Int64(), 10) + switch len(parts[j]) { + case 2: + parts[j] = "0" + parts[j] + case 1: + parts[j] = "00" + parts[j] + } + j-- + } + parts[j] = strconv.Itoa(int(b.Int64())) + return sign + strings.Join(parts[j:], ",") +} diff --git a/vendor/github.com/dustin/go-humanize/commaf.go b/vendor/github.com/dustin/go-humanize/commaf.go new file mode 100644 index 0000000..2bc83a0 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/commaf.go @@ -0,0 +1,41 @@ +//go:build go1.6 +// +build go1.6 + +package humanize + +import ( + "bytes" + "math/big" + "strings" +) + +// BigCommaf produces a string form of the given big.Float in base 10 +// with commas after every three orders of magnitude. +func BigCommaf(v *big.Float) string { + buf := &bytes.Buffer{} + if v.Sign() < 0 { + buf.Write([]byte{'-'}) + v.Abs(v) + } + + comma := []byte{','} + + parts := strings.Split(v.Text('f', -1), ".") + pos := 0 + if len(parts[0])%3 != 0 { + pos += len(parts[0]) % 3 + buf.WriteString(parts[0][:pos]) + buf.Write(comma) + } + for ; pos < len(parts[0]); pos += 3 { + buf.WriteString(parts[0][pos : pos+3]) + buf.Write(comma) + } + buf.Truncate(buf.Len() - 1) + + if len(parts) > 1 { + buf.Write([]byte{'.'}) + buf.WriteString(parts[1]) + } + return buf.String() +} diff --git a/vendor/github.com/dustin/go-humanize/ftoa.go b/vendor/github.com/dustin/go-humanize/ftoa.go new file mode 100644 index 0000000..bce923f --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/ftoa.go @@ -0,0 +1,49 @@ +package humanize + +import ( + "strconv" + "strings" +) + +func stripTrailingZeros(s string) string { + if !strings.ContainsRune(s, '.') { + return s + } + offset := len(s) - 1 + for offset > 0 { + if s[offset] == '.' { + offset-- + break + } + if s[offset] != '0' { + break + } + offset-- + } + return s[:offset+1] +} + +func stripTrailingDigits(s string, digits int) string { + if i := strings.Index(s, "."); i >= 0 { + if digits <= 0 { + return s[:i] + } + i++ + if i+digits >= len(s) { + return s + } + return s[:i+digits] + } + return s +} + +// Ftoa converts a float to a string with no trailing zeros. +func Ftoa(num float64) string { + return stripTrailingZeros(strconv.FormatFloat(num, 'f', 6, 64)) +} + +// FtoaWithDigits converts a float to a string but limits the resulting string +// to the given number of decimal places, and no trailing zeros. +func FtoaWithDigits(num float64, digits int) string { + return stripTrailingZeros(stripTrailingDigits(strconv.FormatFloat(num, 'f', 6, 64), digits)) +} diff --git a/vendor/github.com/dustin/go-humanize/humanize.go b/vendor/github.com/dustin/go-humanize/humanize.go new file mode 100644 index 0000000..a2c2da3 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/humanize.go @@ -0,0 +1,8 @@ +/* +Package humanize converts boring ugly numbers to human-friendly strings and back. + +Durations can be turned into strings such as "3 days ago", numbers +representing sizes like 82854982 into useful strings like, "83 MB" or +"79 MiB" (whichever you prefer). +*/ +package humanize diff --git a/vendor/github.com/dustin/go-humanize/number.go b/vendor/github.com/dustin/go-humanize/number.go new file mode 100644 index 0000000..6470d0d --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/number.go @@ -0,0 +1,192 @@ +package humanize + +/* +Slightly adapted from the source to fit go-humanize. + +Author: https://github.com/gorhill +Source: https://gist.github.com/gorhill/5285193 + +*/ + +import ( + "math" + "strconv" +) + +var ( + renderFloatPrecisionMultipliers = [...]float64{ + 1, + 10, + 100, + 1000, + 10000, + 100000, + 1000000, + 10000000, + 100000000, + 1000000000, + } + + renderFloatPrecisionRounders = [...]float64{ + 0.5, + 0.05, + 0.005, + 0.0005, + 0.00005, + 0.000005, + 0.0000005, + 0.00000005, + 0.000000005, + 0.0000000005, + } +) + +// FormatFloat produces a formatted number as string based on the following user-specified criteria: +// * thousands separator +// * decimal separator +// * decimal precision +// +// Usage: s := RenderFloat(format, n) +// The format parameter tells how to render the number n. +// +// See examples: http://play.golang.org/p/LXc1Ddm1lJ +// +// Examples of format strings, given n = 12345.6789: +// "#,###.##" => "12,345.67" +// "#,###." => "12,345" +// "#,###" => "12345,678" +// "#\u202F###,##" => "12 345,68" +// "#.###,###### => 12.345,678900 +// "" (aka default format) => 12,345.67 +// +// The highest precision allowed is 9 digits after the decimal symbol. +// There is also a version for integer number, FormatInteger(), +// which is convenient for calls within template. +func FormatFloat(format string, n float64) string { + // Special cases: + // NaN = "NaN" + // +Inf = "+Infinity" + // -Inf = "-Infinity" + if math.IsNaN(n) { + return "NaN" + } + if n > math.MaxFloat64 { + return "Infinity" + } + if n < (0.0 - math.MaxFloat64) { + return "-Infinity" + } + + // default format + precision := 2 + decimalStr := "." + thousandStr := "," + positiveStr := "" + negativeStr := "-" + + if len(format) > 0 { + format := []rune(format) + + // If there is an explicit format directive, + // then default values are these: + precision = 9 + thousandStr = "" + + // collect indices of meaningful formatting directives + formatIndx := []int{} + for i, char := range format { + if char != '#' && char != '0' { + formatIndx = append(formatIndx, i) + } + } + + if len(formatIndx) > 0 { + // Directive at index 0: + // Must be a '+' + // Raise an error if not the case + // index: 0123456789 + // +0.000,000 + // +000,000.0 + // +0000.00 + // +0000 + if formatIndx[0] == 0 { + if format[formatIndx[0]] != '+' { + panic("RenderFloat(): invalid positive sign directive") + } + positiveStr = "+" + formatIndx = formatIndx[1:] + } + + // Two directives: + // First is thousands separator + // Raise an error if not followed by 3-digit + // 0123456789 + // 0.000,000 + // 000,000.00 + if len(formatIndx) == 2 { + if (formatIndx[1] - formatIndx[0]) != 4 { + panic("RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers") + } + thousandStr = string(format[formatIndx[0]]) + formatIndx = formatIndx[1:] + } + + // One directive: + // Directive is decimal separator + // The number of digit-specifier following the separator indicates wanted precision + // 0123456789 + // 0.00 + // 000,0000 + if len(formatIndx) == 1 { + decimalStr = string(format[formatIndx[0]]) + precision = len(format) - formatIndx[0] - 1 + } + } + } + + // generate sign part + var signStr string + if n >= 0.000000001 { + signStr = positiveStr + } else if n <= -0.000000001 { + signStr = negativeStr + n = -n + } else { + signStr = "" + n = 0.0 + } + + // split number into integer and fractional parts + intf, fracf := math.Modf(n + renderFloatPrecisionRounders[precision]) + + // generate integer part string + intStr := strconv.FormatInt(int64(intf), 10) + + // add thousand separator if required + if len(thousandStr) > 0 { + for i := len(intStr); i > 3; { + i -= 3 + intStr = intStr[:i] + thousandStr + intStr[i:] + } + } + + // no fractional part, we can leave now + if precision == 0 { + return signStr + intStr + } + + // generate fractional part + fracStr := strconv.Itoa(int(fracf * renderFloatPrecisionMultipliers[precision])) + // may need padding + if len(fracStr) < precision { + fracStr = "000000000000000"[:precision-len(fracStr)] + fracStr + } + + return signStr + intStr + decimalStr + fracStr +} + +// FormatInteger produces a formatted number as string. +// See FormatFloat. +func FormatInteger(format string, n int) string { + return FormatFloat(format, float64(n)) +} diff --git a/vendor/github.com/dustin/go-humanize/ordinals.go b/vendor/github.com/dustin/go-humanize/ordinals.go new file mode 100644 index 0000000..43d88a8 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/ordinals.go @@ -0,0 +1,25 @@ +package humanize + +import "strconv" + +// Ordinal gives you the input number in a rank/ordinal format. +// +// Ordinal(3) -> 3rd +func Ordinal(x int) string { + suffix := "th" + switch x % 10 { + case 1: + if x%100 != 11 { + suffix = "st" + } + case 2: + if x%100 != 12 { + suffix = "nd" + } + case 3: + if x%100 != 13 { + suffix = "rd" + } + } + return strconv.Itoa(x) + suffix +} diff --git a/vendor/github.com/dustin/go-humanize/si.go b/vendor/github.com/dustin/go-humanize/si.go new file mode 100644 index 0000000..8b85019 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/si.go @@ -0,0 +1,127 @@ +package humanize + +import ( + "errors" + "math" + "regexp" + "strconv" +) + +var siPrefixTable = map[float64]string{ + -30: "q", // quecto + -27: "r", // ronto + -24: "y", // yocto + -21: "z", // zepto + -18: "a", // atto + -15: "f", // femto + -12: "p", // pico + -9: "n", // nano + -6: "µ", // micro + -3: "m", // milli + 0: "", + 3: "k", // kilo + 6: "M", // mega + 9: "G", // giga + 12: "T", // tera + 15: "P", // peta + 18: "E", // exa + 21: "Z", // zetta + 24: "Y", // yotta + 27: "R", // ronna + 30: "Q", // quetta +} + +var revSIPrefixTable = revfmap(siPrefixTable) + +// revfmap reverses the map and precomputes the power multiplier +func revfmap(in map[float64]string) map[string]float64 { + rv := map[string]float64{} + for k, v := range in { + rv[v] = math.Pow(10, k) + } + return rv +} + +var riParseRegex *regexp.Regexp + +func init() { + ri := `^([\-0-9.]+)\s?([` + for _, v := range siPrefixTable { + ri += v + } + ri += `]?)(.*)` + + riParseRegex = regexp.MustCompile(ri) +} + +// ComputeSI finds the most appropriate SI prefix for the given number +// and returns the prefix along with the value adjusted to be within +// that prefix. +// +// See also: SI, ParseSI. +// +// e.g. ComputeSI(2.2345e-12) -> (2.2345, "p") +func ComputeSI(input float64) (float64, string) { + if input == 0 { + return 0, "" + } + mag := math.Abs(input) + exponent := math.Floor(logn(mag, 10)) + exponent = math.Floor(exponent/3) * 3 + + value := mag / math.Pow(10, exponent) + + // Handle special case where value is exactly 1000.0 + // Should return 1 M instead of 1000 k + if value == 1000.0 { + exponent += 3 + value = mag / math.Pow(10, exponent) + } + + value = math.Copysign(value, input) + + prefix := siPrefixTable[exponent] + return value, prefix +} + +// SI returns a string with default formatting. +// +// SI uses Ftoa to format float value, removing trailing zeros. +// +// See also: ComputeSI, ParseSI. +// +// e.g. SI(1000000, "B") -> 1 MB +// e.g. SI(2.2345e-12, "F") -> 2.2345 pF +func SI(input float64, unit string) string { + value, prefix := ComputeSI(input) + return Ftoa(value) + " " + prefix + unit +} + +// SIWithDigits works like SI but limits the resulting string to the +// given number of decimal places. +// +// e.g. SIWithDigits(1000000, 0, "B") -> 1 MB +// e.g. SIWithDigits(2.2345e-12, 2, "F") -> 2.23 pF +func SIWithDigits(input float64, decimals int, unit string) string { + value, prefix := ComputeSI(input) + return FtoaWithDigits(value, decimals) + " " + prefix + unit +} + +var errInvalid = errors.New("invalid input") + +// ParseSI parses an SI string back into the number and unit. +// +// See also: SI, ComputeSI. +// +// e.g. ParseSI("2.2345 pF") -> (2.2345e-12, "F", nil) +func ParseSI(input string) (float64, string, error) { + found := riParseRegex.FindStringSubmatch(input) + if len(found) != 4 { + return 0, "", errInvalid + } + mag := revSIPrefixTable[found[2]] + unit := found[3] + + base, err := strconv.ParseFloat(found[1], 64) + return base * mag, unit, err +} diff --git a/vendor/github.com/dustin/go-humanize/times.go b/vendor/github.com/dustin/go-humanize/times.go new file mode 100644 index 0000000..dd3fbf5 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/times.go @@ -0,0 +1,117 @@ +package humanize + +import ( + "fmt" + "math" + "sort" + "time" +) + +// Seconds-based time units +const ( + Day = 24 * time.Hour + Week = 7 * Day + Month = 30 * Day + Year = 12 * Month + LongTime = 37 * Year +) + +// Time formats a time into a relative string. +// +// Time(someT) -> "3 weeks ago" +func Time(then time.Time) string { + return RelTime(then, time.Now(), "ago", "from now") +} + +// A RelTimeMagnitude struct contains a relative time point at which +// the relative format of time will switch to a new format string. A +// slice of these in ascending order by their "D" field is passed to +// CustomRelTime to format durations. +// +// The Format field is a string that may contain a "%s" which will be +// replaced with the appropriate signed label (e.g. "ago" or "from +// now") and a "%d" that will be replaced by the quantity. +// +// The DivBy field is the amount of time the time difference must be +// divided by in order to display correctly. +// +// e.g. if D is 2*time.Minute and you want to display "%d minutes %s" +// DivBy should be time.Minute so whatever the duration is will be +// expressed in minutes. +type RelTimeMagnitude struct { + D time.Duration + Format string + DivBy time.Duration +} + +var defaultMagnitudes = []RelTimeMagnitude{ + {time.Second, "now", time.Second}, + {2 * time.Second, "1 second %s", 1}, + {time.Minute, "%d seconds %s", time.Second}, + {2 * time.Minute, "1 minute %s", 1}, + {time.Hour, "%d minutes %s", time.Minute}, + {2 * time.Hour, "1 hour %s", 1}, + {Day, "%d hours %s", time.Hour}, + {2 * Day, "1 day %s", 1}, + {Week, "%d days %s", Day}, + {2 * Week, "1 week %s", 1}, + {Month, "%d weeks %s", Week}, + {2 * Month, "1 month %s", 1}, + {Year, "%d months %s", Month}, + {18 * Month, "1 year %s", 1}, + {2 * Year, "2 years %s", 1}, + {LongTime, "%d years %s", Year}, + {math.MaxInt64, "a long while %s", 1}, +} + +// RelTime formats a time into a relative string. +// +// It takes two times and two labels. In addition to the generic time +// delta string (e.g. 5 minutes), the labels are used applied so that +// the label corresponding to the smaller time is applied. +// +// RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier" +func RelTime(a, b time.Time, albl, blbl string) string { + return CustomRelTime(a, b, albl, blbl, defaultMagnitudes) +} + +// CustomRelTime formats a time into a relative string. +// +// It takes two times two labels and a table of relative time formats. +// In addition to the generic time delta string (e.g. 5 minutes), the +// labels are used applied so that the label corresponding to the +// smaller time is applied. +func CustomRelTime(a, b time.Time, albl, blbl string, magnitudes []RelTimeMagnitude) string { + lbl := albl + diff := b.Sub(a) + + if a.After(b) { + lbl = blbl + diff = a.Sub(b) + } + + n := sort.Search(len(magnitudes), func(i int) bool { + return magnitudes[i].D > diff + }) + + if n >= len(magnitudes) { + n = len(magnitudes) - 1 + } + mag := magnitudes[n] + args := []interface{}{} + escaped := false + for _, ch := range mag.Format { + if escaped { + switch ch { + case 's': + args = append(args, lbl) + case 'd': + args = append(args, diff/mag.DivBy) + } + escaped = false + } else { + escaped = ch == '%' + } + } + return fmt.Sprintf(mag.Format, args...) +} diff --git a/vendor/github.com/felixge/httpsnoop/.gitignore b/vendor/github.com/felixge/httpsnoop/.gitignore new file mode 100644 index 0000000..e69de29 diff --git a/vendor/github.com/felixge/httpsnoop/.travis.yml b/vendor/github.com/felixge/httpsnoop/.travis.yml new file mode 100644 index 0000000..bfc4212 --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/.travis.yml @@ -0,0 +1,6 @@ +language: go + +go: + - 1.6 + - 1.7 + - 1.8 diff --git a/vendor/github.com/felixge/httpsnoop/LICENSE.txt b/vendor/github.com/felixge/httpsnoop/LICENSE.txt new file mode 100644 index 0000000..e028b46 --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2016 Felix Geisendörfer (felix@debuggable.com) + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. diff --git a/vendor/github.com/felixge/httpsnoop/Makefile b/vendor/github.com/felixge/httpsnoop/Makefile new file mode 100644 index 0000000..2d84889 --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/Makefile @@ -0,0 +1,10 @@ +.PHONY: ci generate clean + +ci: clean generate + go test -v ./... + +generate: + go generate . + +clean: + rm -rf *_generated*.go diff --git a/vendor/github.com/felixge/httpsnoop/README.md b/vendor/github.com/felixge/httpsnoop/README.md new file mode 100644 index 0000000..ddcecd1 --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/README.md @@ -0,0 +1,95 @@ +# httpsnoop + +Package httpsnoop provides an easy way to capture http related metrics (i.e. +response time, bytes written, and http status code) from your application's +http.Handlers. + +Doing this requires non-trivial wrapping of the http.ResponseWriter interface, +which is also exposed for users interested in a more low-level API. + +[![GoDoc](https://godoc.org/github.com/felixge/httpsnoop?status.svg)](https://godoc.org/github.com/felixge/httpsnoop) +[![Build Status](https://travis-ci.org/felixge/httpsnoop.svg?branch=master)](https://travis-ci.org/felixge/httpsnoop) + +## Usage Example + +```go +// myH is your app's http handler, perhaps a http.ServeMux or similar. +var myH http.Handler +// wrappedH wraps myH in order to log every request. +wrappedH := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + m := httpsnoop.CaptureMetrics(myH, w, r) + log.Printf( + "%s %s (code=%d dt=%s written=%d)", + r.Method, + r.URL, + m.Code, + m.Duration, + m.Written, + ) +}) +http.ListenAndServe(":8080", wrappedH) +``` + +## Why this package exists + +Instrumenting an application's http.Handler is surprisingly difficult. + +However if you google for e.g. "capture ResponseWriter status code" you'll find +lots of advise and code examples that suggest it to be a fairly trivial +undertaking. Unfortunately everything I've seen so far has a high chance of +breaking your application. + +The main problem is that a `http.ResponseWriter` often implements additional +interfaces such as `http.Flusher`, `http.CloseNotifier`, `http.Hijacker`, `http.Pusher`, and +`io.ReaderFrom`. So the naive approach of just wrapping `http.ResponseWriter` +in your own struct that also implements the `http.ResponseWriter` interface +will hide the additional interfaces mentioned above. This has a high change of +introducing subtle bugs into any non-trivial application. + +Another approach I've seen people take is to return a struct that implements +all of the interfaces above. However, that's also problematic, because it's +difficult to fake some of these interfaces behaviors when the underlying +`http.ResponseWriter` doesn't have an implementation. It's also dangerous, +because an application may choose to operate differently, merely because it +detects the presence of these additional interfaces. + +This package solves this problem by checking which additional interfaces a +`http.ResponseWriter` implements, returning a wrapped version implementing the +exact same set of interfaces. + +Additionally this package properly handles edge cases such as `WriteHeader` not +being called, or called more than once, as well as concurrent calls to +`http.ResponseWriter` methods, and even calls happening after the wrapped +`ServeHTTP` has already returned. + +Unfortunately this package is not perfect either. It's possible that it is +still missing some interfaces provided by the go core (let me know if you find +one), and it won't work for applications adding their own interfaces into the +mix. You can however use `httpsnoop.Unwrap(w)` to access the underlying +`http.ResponseWriter` and type-assert the result to its other interfaces. + +However, hopefully the explanation above has sufficiently scared you of rolling +your own solution to this problem. httpsnoop may still break your application, +but at least it tries to avoid it as much as possible. + +Anyway, the real problem here is that smuggling additional interfaces inside +`http.ResponseWriter` is a problematic design choice, but it probably goes as +deep as the Go language specification itself. But that's okay, I still prefer +Go over the alternatives ;). + +## Performance + +``` +BenchmarkBaseline-8 20000 94912 ns/op +BenchmarkCaptureMetrics-8 20000 95461 ns/op +``` + +As you can see, using `CaptureMetrics` on a vanilla http.Handler introduces an +overhead of ~500 ns per http request on my machine. However, the margin of +error appears to be larger than that, therefor it should be reasonable to +assume that the overhead introduced by `CaptureMetrics` is absolutely +negligible. + +## License + +MIT diff --git a/vendor/github.com/felixge/httpsnoop/capture_metrics.go b/vendor/github.com/felixge/httpsnoop/capture_metrics.go new file mode 100644 index 0000000..b77cc7c --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/capture_metrics.go @@ -0,0 +1,86 @@ +package httpsnoop + +import ( + "io" + "net/http" + "time" +) + +// Metrics holds metrics captured from CaptureMetrics. +type Metrics struct { + // Code is the first http response code passed to the WriteHeader func of + // the ResponseWriter. If no such call is made, a default code of 200 is + // assumed instead. + Code int + // Duration is the time it took to execute the handler. + Duration time.Duration + // Written is the number of bytes successfully written by the Write or + // ReadFrom function of the ResponseWriter. ResponseWriters may also write + // data to their underlaying connection directly (e.g. headers), but those + // are not tracked. Therefor the number of Written bytes will usually match + // the size of the response body. + Written int64 +} + +// CaptureMetrics wraps the given hnd, executes it with the given w and r, and +// returns the metrics it captured from it. +func CaptureMetrics(hnd http.Handler, w http.ResponseWriter, r *http.Request) Metrics { + return CaptureMetricsFn(w, func(ww http.ResponseWriter) { + hnd.ServeHTTP(ww, r) + }) +} + +// CaptureMetricsFn wraps w and calls fn with the wrapped w and returns the +// resulting metrics. This is very similar to CaptureMetrics (which is just +// sugar on top of this func), but is a more usable interface if your +// application doesn't use the Go http.Handler interface. +func CaptureMetricsFn(w http.ResponseWriter, fn func(http.ResponseWriter)) Metrics { + m := Metrics{Code: http.StatusOK} + m.CaptureMetrics(w, fn) + return m +} + +// CaptureMetrics wraps w and calls fn with the wrapped w and updates +// Metrics m with the resulting metrics. This is similar to CaptureMetricsFn, +// but allows one to customize starting Metrics object. +func (m *Metrics) CaptureMetrics(w http.ResponseWriter, fn func(http.ResponseWriter)) { + var ( + start = time.Now() + headerWritten bool + hooks = Hooks{ + WriteHeader: func(next WriteHeaderFunc) WriteHeaderFunc { + return func(code int) { + next(code) + + if !headerWritten { + m.Code = code + headerWritten = true + } + } + }, + + Write: func(next WriteFunc) WriteFunc { + return func(p []byte) (int, error) { + n, err := next(p) + + m.Written += int64(n) + headerWritten = true + return n, err + } + }, + + ReadFrom: func(next ReadFromFunc) ReadFromFunc { + return func(src io.Reader) (int64, error) { + n, err := next(src) + + headerWritten = true + m.Written += n + return n, err + } + }, + } + ) + + fn(Wrap(w, hooks)) + m.Duration += time.Since(start) +} diff --git a/vendor/github.com/felixge/httpsnoop/docs.go b/vendor/github.com/felixge/httpsnoop/docs.go new file mode 100644 index 0000000..203c35b --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/docs.go @@ -0,0 +1,10 @@ +// Package httpsnoop provides an easy way to capture http related metrics (i.e. +// response time, bytes written, and http status code) from your application's +// http.Handlers. +// +// Doing this requires non-trivial wrapping of the http.ResponseWriter +// interface, which is also exposed for users interested in a more low-level +// API. +package httpsnoop + +//go:generate go run codegen/main.go diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go new file mode 100644 index 0000000..31cbdfb --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go @@ -0,0 +1,436 @@ +// +build go1.8 +// Code generated by "httpsnoop/codegen"; DO NOT EDIT + +package httpsnoop + +import ( + "bufio" + "io" + "net" + "net/http" +) + +// HeaderFunc is part of the http.ResponseWriter interface. +type HeaderFunc func() http.Header + +// WriteHeaderFunc is part of the http.ResponseWriter interface. +type WriteHeaderFunc func(code int) + +// WriteFunc is part of the http.ResponseWriter interface. +type WriteFunc func(b []byte) (int, error) + +// FlushFunc is part of the http.Flusher interface. +type FlushFunc func() + +// CloseNotifyFunc is part of the http.CloseNotifier interface. +type CloseNotifyFunc func() <-chan bool + +// HijackFunc is part of the http.Hijacker interface. +type HijackFunc func() (net.Conn, *bufio.ReadWriter, error) + +// ReadFromFunc is part of the io.ReaderFrom interface. +type ReadFromFunc func(src io.Reader) (int64, error) + +// PushFunc is part of the http.Pusher interface. +type PushFunc func(target string, opts *http.PushOptions) error + +// Hooks defines a set of method interceptors for methods included in +// http.ResponseWriter as well as some others. You can think of them as +// middleware for the function calls they target. See Wrap for more details. +type Hooks struct { + Header func(HeaderFunc) HeaderFunc + WriteHeader func(WriteHeaderFunc) WriteHeaderFunc + Write func(WriteFunc) WriteFunc + Flush func(FlushFunc) FlushFunc + CloseNotify func(CloseNotifyFunc) CloseNotifyFunc + Hijack func(HijackFunc) HijackFunc + ReadFrom func(ReadFromFunc) ReadFromFunc + Push func(PushFunc) PushFunc +} + +// Wrap returns a wrapped version of w that provides the exact same interface +// as w. Specifically if w implements any combination of: +// +// - http.Flusher +// - http.CloseNotifier +// - http.Hijacker +// - io.ReaderFrom +// - http.Pusher +// +// The wrapped version will implement the exact same combination. If no hooks +// are set, the wrapped version also behaves exactly as w. Hooks targeting +// methods not supported by w are ignored. Any other hooks will intercept the +// method they target and may modify the call's arguments and/or return values. +// The CaptureMetrics implementation serves as a working example for how the +// hooks can be used. +func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter { + rw := &rw{w: w, h: hooks} + _, i0 := w.(http.Flusher) + _, i1 := w.(http.CloseNotifier) + _, i2 := w.(http.Hijacker) + _, i3 := w.(io.ReaderFrom) + _, i4 := w.(http.Pusher) + switch { + // combination 1/32 + case !i0 && !i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + }{rw, rw} + // combination 2/32 + case !i0 && !i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Pusher + }{rw, rw, rw} + // combination 3/32 + case !i0 && !i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + io.ReaderFrom + }{rw, rw, rw} + // combination 4/32 + case !i0 && !i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw} + // combination 5/32 + case !i0 && !i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + }{rw, rw, rw} + // combination 6/32 + case !i0 && !i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + http.Pusher + }{rw, rw, rw, rw} + // combination 7/32 + case !i0 && !i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 8/32 + case !i0 && !i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 9/32 + case !i0 && i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + }{rw, rw, rw} + // combination 10/32 + case !i0 && i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Pusher + }{rw, rw, rw, rw} + // combination 11/32 + case !i0 && i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 12/32 + case !i0 && i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 13/32 + case !i0 && i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw} + // combination 14/32 + case !i0 && i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 15/32 + case !i0 && i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 16/32 + case !i0 && i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 17/32 + case i0 && !i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + }{rw, rw, rw} + // combination 18/32 + case i0 && !i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Pusher + }{rw, rw, rw, rw} + // combination 19/32 + case i0 && !i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 20/32 + case i0 && !i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 21/32 + case i0 && !i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + }{rw, rw, rw, rw} + // combination 22/32 + case i0 && !i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 23/32 + case i0 && !i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 24/32 + case i0 && !i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 25/32 + case i0 && i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + }{rw, rw, rw, rw} + // combination 26/32 + case i0 && i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 27/32 + case i0 && i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 28/32 + case i0 && i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 29/32 + case i0 && i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw, rw} + // combination 30/32 + case i0 && i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 31/32 + case i0 && i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw, rw} + // combination 32/32 + case i0 && i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw, rw} + } + panic("unreachable") +} + +type rw struct { + w http.ResponseWriter + h Hooks +} + +func (w *rw) Unwrap() http.ResponseWriter { + return w.w +} + +func (w *rw) Header() http.Header { + f := w.w.(http.ResponseWriter).Header + if w.h.Header != nil { + f = w.h.Header(f) + } + return f() +} + +func (w *rw) WriteHeader(code int) { + f := w.w.(http.ResponseWriter).WriteHeader + if w.h.WriteHeader != nil { + f = w.h.WriteHeader(f) + } + f(code) +} + +func (w *rw) Write(b []byte) (int, error) { + f := w.w.(http.ResponseWriter).Write + if w.h.Write != nil { + f = w.h.Write(f) + } + return f(b) +} + +func (w *rw) Flush() { + f := w.w.(http.Flusher).Flush + if w.h.Flush != nil { + f = w.h.Flush(f) + } + f() +} + +func (w *rw) CloseNotify() <-chan bool { + f := w.w.(http.CloseNotifier).CloseNotify + if w.h.CloseNotify != nil { + f = w.h.CloseNotify(f) + } + return f() +} + +func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) { + f := w.w.(http.Hijacker).Hijack + if w.h.Hijack != nil { + f = w.h.Hijack(f) + } + return f() +} + +func (w *rw) ReadFrom(src io.Reader) (int64, error) { + f := w.w.(io.ReaderFrom).ReadFrom + if w.h.ReadFrom != nil { + f = w.h.ReadFrom(f) + } + return f(src) +} + +func (w *rw) Push(target string, opts *http.PushOptions) error { + f := w.w.(http.Pusher).Push + if w.h.Push != nil { + f = w.h.Push(f) + } + return f(target, opts) +} + +type Unwrapper interface { + Unwrap() http.ResponseWriter +} + +// Unwrap returns the underlying http.ResponseWriter from within zero or more +// layers of httpsnoop wrappers. +func Unwrap(w http.ResponseWriter) http.ResponseWriter { + if rw, ok := w.(Unwrapper); ok { + // recurse until rw.Unwrap() returns a non-Unwrapper + return Unwrap(rw.Unwrap()) + } else { + return w + } +} diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go new file mode 100644 index 0000000..ab99c07 --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go @@ -0,0 +1,278 @@ +// +build !go1.8 +// Code generated by "httpsnoop/codegen"; DO NOT EDIT + +package httpsnoop + +import ( + "bufio" + "io" + "net" + "net/http" +) + +// HeaderFunc is part of the http.ResponseWriter interface. +type HeaderFunc func() http.Header + +// WriteHeaderFunc is part of the http.ResponseWriter interface. +type WriteHeaderFunc func(code int) + +// WriteFunc is part of the http.ResponseWriter interface. +type WriteFunc func(b []byte) (int, error) + +// FlushFunc is part of the http.Flusher interface. +type FlushFunc func() + +// CloseNotifyFunc is part of the http.CloseNotifier interface. +type CloseNotifyFunc func() <-chan bool + +// HijackFunc is part of the http.Hijacker interface. +type HijackFunc func() (net.Conn, *bufio.ReadWriter, error) + +// ReadFromFunc is part of the io.ReaderFrom interface. +type ReadFromFunc func(src io.Reader) (int64, error) + +// Hooks defines a set of method interceptors for methods included in +// http.ResponseWriter as well as some others. You can think of them as +// middleware for the function calls they target. See Wrap for more details. +type Hooks struct { + Header func(HeaderFunc) HeaderFunc + WriteHeader func(WriteHeaderFunc) WriteHeaderFunc + Write func(WriteFunc) WriteFunc + Flush func(FlushFunc) FlushFunc + CloseNotify func(CloseNotifyFunc) CloseNotifyFunc + Hijack func(HijackFunc) HijackFunc + ReadFrom func(ReadFromFunc) ReadFromFunc +} + +// Wrap returns a wrapped version of w that provides the exact same interface +// as w. Specifically if w implements any combination of: +// +// - http.Flusher +// - http.CloseNotifier +// - http.Hijacker +// - io.ReaderFrom +// +// The wrapped version will implement the exact same combination. If no hooks +// are set, the wrapped version also behaves exactly as w. Hooks targeting +// methods not supported by w are ignored. Any other hooks will intercept the +// method they target and may modify the call's arguments and/or return values. +// The CaptureMetrics implementation serves as a working example for how the +// hooks can be used. +func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter { + rw := &rw{w: w, h: hooks} + _, i0 := w.(http.Flusher) + _, i1 := w.(http.CloseNotifier) + _, i2 := w.(http.Hijacker) + _, i3 := w.(io.ReaderFrom) + switch { + // combination 1/16 + case !i0 && !i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + }{rw, rw} + // combination 2/16 + case !i0 && !i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + io.ReaderFrom + }{rw, rw, rw} + // combination 3/16 + case !i0 && !i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + }{rw, rw, rw} + // combination 4/16 + case !i0 && !i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 5/16 + case !i0 && i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + }{rw, rw, rw} + // combination 6/16 + case !i0 && i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 7/16 + case !i0 && i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw} + // combination 8/16 + case !i0 && i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 9/16 + case i0 && !i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + }{rw, rw, rw} + // combination 10/16 + case i0 && !i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 11/16 + case i0 && !i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + }{rw, rw, rw, rw} + // combination 12/16 + case i0 && !i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 13/16 + case i0 && i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + }{rw, rw, rw, rw} + // combination 14/16 + case i0 && i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 15/16 + case i0 && i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw, rw} + // combination 16/16 + case i0 && i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw, rw} + } + panic("unreachable") +} + +type rw struct { + w http.ResponseWriter + h Hooks +} + +func (w *rw) Unwrap() http.ResponseWriter { + return w.w +} + +func (w *rw) Header() http.Header { + f := w.w.(http.ResponseWriter).Header + if w.h.Header != nil { + f = w.h.Header(f) + } + return f() +} + +func (w *rw) WriteHeader(code int) { + f := w.w.(http.ResponseWriter).WriteHeader + if w.h.WriteHeader != nil { + f = w.h.WriteHeader(f) + } + f(code) +} + +func (w *rw) Write(b []byte) (int, error) { + f := w.w.(http.ResponseWriter).Write + if w.h.Write != nil { + f = w.h.Write(f) + } + return f(b) +} + +func (w *rw) Flush() { + f := w.w.(http.Flusher).Flush + if w.h.Flush != nil { + f = w.h.Flush(f) + } + f() +} + +func (w *rw) CloseNotify() <-chan bool { + f := w.w.(http.CloseNotifier).CloseNotify + if w.h.CloseNotify != nil { + f = w.h.CloseNotify(f) + } + return f() +} + +func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) { + f := w.w.(http.Hijacker).Hijack + if w.h.Hijack != nil { + f = w.h.Hijack(f) + } + return f() +} + +func (w *rw) ReadFrom(src io.Reader) (int64, error) { + f := w.w.(io.ReaderFrom).ReadFrom + if w.h.ReadFrom != nil { + f = w.h.ReadFrom(f) + } + return f(src) +} + +type Unwrapper interface { + Unwrap() http.ResponseWriter +} + +// Unwrap returns the underlying http.ResponseWriter from within zero or more +// layers of httpsnoop wrappers. +func Unwrap(w http.ResponseWriter) http.ResponseWriter { + if rw, ok := w.(Unwrapper); ok { + // recurse until rw.Unwrap() returns a non-Unwrapper + return Unwrap(rw.Unwrap()) + } else { + return w + } +} diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md new file mode 100644 index 0000000..7ec5ac7 --- /dev/null +++ b/vendor/github.com/google/uuid/CHANGELOG.md @@ -0,0 +1,41 @@ +# Changelog + +## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16) + + +### Features + +* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3)) + + +### Bug Fixes + +* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06)) +* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6)) + +## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12) + + +### Features + +* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29)) + +## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26) + + +### Features + +* UUIDs slice type with Strings() convenience method ([#133](https://github.com/google/uuid/issues/133)) ([cd5fbbd](https://github.com/google/uuid/commit/cd5fbbdd02f3e3467ac18940e07e062be1f864b4)) + +### Fixes + +* Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior) + +## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18) + + +### Bug Fixes + +* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0)) + +## Changelog diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md new file mode 100644 index 0000000..a502fdc --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTING.md @@ -0,0 +1,26 @@ +# How to contribute + +We definitely welcome patches and contribution to this project! + +### Tips + +Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org). + +Always try to include a test case! If it is not possible or not necessary, +please explain why in the pull request description. + +### Releasing + +Commits that would precipitate a SemVer change, as described in the Conventional +Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action) +to create a release candidate pull request. Once submitted, `release-please` +will create a release. + +For tips on how to work with `release-please`, see its documentation. + +### Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://cla.developers.google.com/clas). + +You may have already signed it for other Google projects. diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS new file mode 100644 index 0000000..b4bb97f --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTORS @@ -0,0 +1,9 @@ +Paul Borman +bmatsuo +shawnps +theory +jboverfelt +dsymonds +cd1 +wallclockbuilder +dansouza diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE new file mode 100644 index 0000000..5dc6826 --- /dev/null +++ b/vendor/github.com/google/uuid/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md new file mode 100644 index 0000000..3e9a618 --- /dev/null +++ b/vendor/github.com/google/uuid/README.md @@ -0,0 +1,21 @@ +# uuid +The uuid package generates and inspects UUIDs based on +[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122) +and DCE 1.1: Authentication and Security Services. + +This package is based on the github.com/pborman/uuid package (previously named +code.google.com/p/go-uuid). It differs from these earlier packages in that +a UUID is a 16 byte array rather than a byte slice. One loss due to this +change is the ability to represent an invalid UUID (vs a NIL UUID). + +###### Install +```sh +go get github.com/google/uuid +``` + +###### Documentation +[![Go Reference](https://pkg.go.dev/badge/github.com/google/uuid.svg)](https://pkg.go.dev/github.com/google/uuid) + +Full `go doc` style documentation for the package can be viewed online without +installing this package by using the GoDoc site here: +http://pkg.go.dev/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go new file mode 100644 index 0000000..fa820b9 --- /dev/null +++ b/vendor/github.com/google/uuid/dce.go @@ -0,0 +1,80 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) (UUID, error) { + uuid, err := NewUUID() + if err == nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid, err +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCESecurity(Person, uint32(os.Getuid())) +func NewDCEPerson() (UUID, error) { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCESecurity(Group, uint32(os.Getgid())) +func NewDCEGroup() (UUID, error) { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID. Domains are only defined +// for Version 2 UUIDs. +func (uuid UUID) Domain() Domain { + return Domain(uuid[9]) +} + +// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 +// UUIDs. +func (uuid UUID) ID() uint32 { + return binary.BigEndian.Uint32(uuid[0:4]) +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go new file mode 100644 index 0000000..5b8a4b9 --- /dev/null +++ b/vendor/github.com/google/uuid/doc.go @@ -0,0 +1,12 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uuid generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security +// Services. +// +// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to +// maps or compared directly. +package uuid diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go new file mode 100644 index 0000000..dc60082 --- /dev/null +++ b/vendor/github.com/google/uuid/hash.go @@ -0,0 +1,59 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known namespace IDs and UUIDs +var ( + NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) + Nil UUID // empty UUID, all zeros + + // The Max UUID is special form of UUID that is specified to have all 128 bits set to 1. + Max = UUID{ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + } +) + +// NewHash returns a new UUID derived from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space[:]) //nolint:errcheck + h.Write(data) //nolint:errcheck + s := h.Sum(nil) + var uuid UUID + copy(uuid[:], s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go new file mode 100644 index 0000000..14bd340 --- /dev/null +++ b/vendor/github.com/google/uuid/marshal.go @@ -0,0 +1,38 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "fmt" + +// MarshalText implements encoding.TextMarshaler. +func (uuid UUID) MarshalText() ([]byte, error) { + var js [36]byte + encodeHex(js[:], uuid) + return js[:], nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (uuid *UUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err != nil { + return err + } + *uuid = id + return nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (uuid UUID) MarshalBinary() ([]byte, error) { + return uuid[:], nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (uuid *UUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(uuid[:], data) + return nil +} diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go new file mode 100644 index 0000000..d651a2b --- /dev/null +++ b/vendor/github.com/google/uuid/node.go @@ -0,0 +1,90 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "sync" +) + +var ( + nodeMu sync.Mutex + ifname string // name of interface being used + nodeID [6]byte // hardware for version 1 UUIDs + zeroID [6]byte // nodeID with only 0's +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + defer nodeMu.Unlock() + nodeMu.Lock() + return ifname +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + defer nodeMu.Unlock() + nodeMu.Lock() + return setNodeInterface(name) +} + +func setNodeInterface(name string) bool { + iname, addr := getHardwareInterface(name) // null implementation for js + if iname != "" && addr != nil { + ifname = iname + copy(nodeID[:], addr) + return true + } + + // We found no interfaces with a valid hardware address. If name + // does not specify a specific interface generate a random Node ID + // (section 4.1.6) + if name == "" { + ifname = "random" + randomBits(nodeID[:]) + return true + } + return false +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + defer nodeMu.Unlock() + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nid := nodeID + return nid[:] +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + if len(id) < 6 { + return false + } + defer nodeMu.Unlock() + nodeMu.Lock() + copy(nodeID[:], id) + ifname = "user" + return true +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + var node [6]byte + copy(node[:], uuid[10:]) + return node[:] +} diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go new file mode 100644 index 0000000..b2a0bc8 --- /dev/null +++ b/vendor/github.com/google/uuid/node_js.go @@ -0,0 +1,12 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build js + +package uuid + +// getHardwareInterface returns nil values for the JS version of the code. +// This removes the "net" dependency, because it is not used in the browser. +// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. +func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go new file mode 100644 index 0000000..0cbbcdd --- /dev/null +++ b/vendor/github.com/google/uuid/node_net.go @@ -0,0 +1,33 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !js + +package uuid + +import "net" + +var interfaces []net.Interface // cached list of interfaces + +// getHardwareInterface returns the name and hardware address of interface name. +// If name is "" then the name and hardware address of one of the system's +// interfaces is returned. If no interfaces are found (name does not exist or +// there are no interfaces) then "", nil is returned. +// +// Only addresses of at least 6 bytes are returned. +func getHardwareInterface(name string) (string, []byte) { + if interfaces == nil { + var err error + interfaces, err = net.Interfaces() + if err != nil { + return "", nil + } + } + for _, ifs := range interfaces { + if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { + return ifs.Name, ifs.HardwareAddr + } + } + return "", nil +} diff --git a/vendor/github.com/google/uuid/null.go b/vendor/github.com/google/uuid/null.go new file mode 100644 index 0000000..d7fcbf2 --- /dev/null +++ b/vendor/github.com/google/uuid/null.go @@ -0,0 +1,118 @@ +// Copyright 2021 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "database/sql/driver" + "encoding/json" + "fmt" +) + +var jsonNull = []byte("null") + +// NullUUID represents a UUID that may be null. +// NullUUID implements the SQL driver.Scanner interface so +// it can be used as a scan destination: +// +// var u uuid.NullUUID +// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u) +// ... +// if u.Valid { +// // use u.UUID +// } else { +// // NULL value +// } +// +type NullUUID struct { + UUID UUID + Valid bool // Valid is true if UUID is not NULL +} + +// Scan implements the SQL driver.Scanner interface. +func (nu *NullUUID) Scan(value interface{}) error { + if value == nil { + nu.UUID, nu.Valid = Nil, false + return nil + } + + err := nu.UUID.Scan(value) + if err != nil { + nu.Valid = false + return err + } + + nu.Valid = true + return nil +} + +// Value implements the driver Valuer interface. +func (nu NullUUID) Value() (driver.Value, error) { + if !nu.Valid { + return nil, nil + } + // Delegate to UUID Value function + return nu.UUID.Value() +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (nu NullUUID) MarshalBinary() ([]byte, error) { + if nu.Valid { + return nu.UUID[:], nil + } + + return []byte(nil), nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (nu *NullUUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(nu.UUID[:], data) + nu.Valid = true + return nil +} + +// MarshalText implements encoding.TextMarshaler. +func (nu NullUUID) MarshalText() ([]byte, error) { + if nu.Valid { + return nu.UUID.MarshalText() + } + + return jsonNull, nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (nu *NullUUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err != nil { + nu.Valid = false + return err + } + nu.UUID = id + nu.Valid = true + return nil +} + +// MarshalJSON implements json.Marshaler. +func (nu NullUUID) MarshalJSON() ([]byte, error) { + if nu.Valid { + return json.Marshal(nu.UUID) + } + + return jsonNull, nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (nu *NullUUID) UnmarshalJSON(data []byte) error { + if bytes.Equal(data, jsonNull) { + *nu = NullUUID{} + return nil // valid null UUID + } + err := json.Unmarshal(data, &nu.UUID) + nu.Valid = err == nil + return err +} diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go new file mode 100644 index 0000000..2e02ec0 --- /dev/null +++ b/vendor/github.com/google/uuid/sql.go @@ -0,0 +1,59 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements sql.Scanner so UUIDs can be read from databases transparently. +// Currently, database types that map to string and []byte are supported. Please +// consult database-specific driver documentation for matching types. +func (uuid *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case nil: + return nil + + case string: + // if an empty UUID comes from a table, we return a null UUID + if src == "" { + return nil + } + + // see Parse for required string format + u, err := Parse(src) + if err != nil { + return fmt.Errorf("Scan: %v", err) + } + + *uuid = u + + case []byte: + // if an empty UUID comes from a table, we return a null UUID + if len(src) == 0 { + return nil + } + + // assumes a simple slice of bytes if 16 bytes + // otherwise attempts to parse + if len(src) != 16 { + return uuid.Scan(string(src)) + } + copy((*uuid)[:], src) + + default: + return fmt.Errorf("Scan: unable to scan type %T into UUID", src) + } + + return nil +} + +// Value implements sql.Valuer so that UUIDs can be written to databases +// transparently. Currently, UUIDs map to strings. Please consult +// database-specific driver documentation for matching types. +func (uuid UUID) Value() (driver.Value, error) { + return uuid.String(), nil +} diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go new file mode 100644 index 0000000..c351129 --- /dev/null +++ b/vendor/github.com/google/uuid/time.go @@ -0,0 +1,134 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "sync" + "time" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time int64 + +const ( + lillian = 2299160 // Julian day of 15 Oct 1582 + unix = 2440587 // Julian day of 1 Jan 1970 + epoch = unix - lillian // Days between epochs + g1582 = epoch * 86400 // seconds between epochs + g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs +) + +var ( + timeMu sync.Mutex + lasttime uint64 // last time we returned + clockSeq uint16 // clock sequence for this run + + timeNow = time.Now // for testing +) + +// UnixTime converts t the number of seconds and nanoseconds using the Unix +// epoch of 1 Jan 1970. +func (t Time) UnixTime() (sec, nsec int64) { + sec = int64(t - g1582ns100) + nsec = (sec % 10000000) * 100 + sec /= 10000000 + return sec, nsec +} + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// clock sequence as well as adjusting the clock sequence as needed. An error +// is returned if the current time cannot be determined. +func GetTime() (Time, uint16, error) { + defer timeMu.Unlock() + timeMu.Lock() + return getTime() +} + +func getTime() (Time, uint16, error) { + t := timeNow() + + // If we don't have a clock sequence already, set one. + if clockSeq == 0 { + setClockSequence(-1) + } + now := uint64(t.UnixNano()/100) + g1582ns100 + + // If time has gone backwards with this clock sequence then we + // increment the clock sequence + if now <= lasttime { + clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 + } + lasttime = now + return Time(now), clockSeq, nil +} + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence is used, a new +// random clock sequence is generated the first time a clock sequence is +// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) +func ClockSequence() int { + defer timeMu.Unlock() + timeMu.Lock() + return clockSequence() +} + +func clockSequence() int { + if clockSeq == 0 { + setClockSequence(-1) + } + return int(clockSeq & 0x3fff) +} + +// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { + defer timeMu.Unlock() + timeMu.Lock() + setClockSequence(seq) +} + +func setClockSequence(seq int) { + if seq == -1 { + var b [2]byte + randomBits(b[:]) // clock sequence + seq = int(b[0])<<8 | int(b[1]) + } + oldSeq := clockSeq + clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant + if oldSeq != clockSeq { + lasttime = 0 + } +} + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. The time is only defined for version 1, 2, 6 and 7 UUIDs. +func (uuid UUID) Time() Time { + var t Time + switch uuid.Version() { + case 6: + time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110 + t = Time(time) + case 7: + time := binary.BigEndian.Uint64(uuid[:8]) + t = Time((time>>16)*10000 + g1582ns100) + default: // forward compatible + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + t = Time(time) + } + return t +} + +// ClockSequence returns the clock sequence encoded in uuid. +// The clock sequence is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) ClockSequence() int { + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff +} diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go new file mode 100644 index 0000000..5ea6c73 --- /dev/null +++ b/vendor/github.com/google/uuid/util.go @@ -0,0 +1,43 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// randomBits completely fills slice b with random data. +func randomBits(b []byte) { + if _, err := io.ReadFull(rander, b); err != nil { + panic(err.Error()) // rand should never fail + } +} + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = [256]byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts hex characters x1 and x2 into a byte. +func xtob(x1, x2 byte) (byte, bool) { + b1 := xvalues[x1] + b2 := xvalues[x2] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go new file mode 100644 index 0000000..5232b48 --- /dev/null +++ b/vendor/github.com/google/uuid/uuid.go @@ -0,0 +1,365 @@ +// Copyright 2018 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "io" + "strings" + "sync" +) + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID [16]byte + +// A Version represents a UUID's version. +type Version byte + +// A Variant represents a UUID's variant. +type Variant byte + +// Constants returned by Variant. +const ( + Invalid = Variant(iota) // Invalid UUID + RFC4122 // The variant specified in RFC4122 + Reserved // Reserved, NCS backward compatibility. + Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future // Reserved for future definition. +) + +const randPoolSize = 16 * 16 + +var ( + rander = rand.Reader // random function + poolEnabled = false + poolMu sync.Mutex + poolPos = randPoolSize // protected with poolMu + pool [randPoolSize]byte // protected with poolMu +) + +type invalidLengthError struct{ len int } + +func (err invalidLengthError) Error() string { + return fmt.Sprintf("invalid UUID length: %d", err.len) +} + +// IsInvalidLengthError is matcher function for custom error invalidLengthError +func IsInvalidLengthError(err error) bool { + _, ok := err.(invalidLengthError) + return ok +} + +// Parse decodes s into a UUID or returns an error if it cannot be parsed. Both +// the standard UUID forms defined in RFC 4122 +// (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) are decoded. In addition, +// Parse accepts non-standard strings such as the raw hex encoding +// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx and 38 byte "Microsoft style" encodings, +// e.g. {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}. Only the middle 36 bytes are +// examined in the latter case. Parse should not be used to validate strings as +// it parses non-standard encodings as indicated above. +func Parse(s string) (UUID, error) { + var uuid UUID + switch len(s) { + // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36: + + // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: + if !strings.EqualFold(s[:9], "urn:uuid:") { + return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + + // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + case 36 + 2: + s = s[1:] + + // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + case 32: + var ok bool + for i := range uuid { + uuid[i], ok = xtob(s[i*2], s[i*2+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, invalidLengthError{len(s)} + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34, + } { + v, ok := xtob(s[x], s[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// ParseBytes is like Parse, except it parses a byte slice instead of a string. +func ParseBytes(b []byte) (UUID, error) { + var uuid UUID + switch len(b) { + case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) { + return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) + } + b = b[9:] + case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + b = b[1:] + case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + var ok bool + for i := 0; i < 32; i += 2 { + uuid[i/2], ok = xtob(b[i], b[i+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, invalidLengthError{len(b)} + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34, + } { + v, ok := xtob(b[x], b[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// MustParse is like Parse but panics if the string cannot be parsed. +// It simplifies safe initialization of global variables holding compiled UUIDs. +func MustParse(s string) UUID { + uuid, err := Parse(s) + if err != nil { + panic(`uuid: Parse(` + s + `): ` + err.Error()) + } + return uuid +} + +// FromBytes creates a new UUID from a byte slice. Returns an error if the slice +// does not have a length of 16. The bytes are copied from the slice. +func FromBytes(b []byte) (uuid UUID, err error) { + err = uuid.UnmarshalBinary(b) + return uuid, err +} + +// Must returns uuid if err is nil and panics otherwise. +func Must(uuid UUID, err error) UUID { + if err != nil { + panic(err) + } + return uuid +} + +// Validate returns an error if s is not a properly formatted UUID in one of the following formats: +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} +// It returns an error if the format is invalid, otherwise nil. +func Validate(s string) error { + switch len(s) { + // Standard UUID format + case 36: + + // UUID with "urn:uuid:" prefix + case 36 + 9: + if !strings.EqualFold(s[:9], "urn:uuid:") { + return fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + + // UUID enclosed in braces + case 36 + 2: + if s[0] != '{' || s[len(s)-1] != '}' { + return fmt.Errorf("invalid bracketed UUID format") + } + s = s[1 : len(s)-1] + + // UUID without hyphens + case 32: + for i := 0; i < len(s); i += 2 { + _, ok := xtob(s[i], s[i+1]) + if !ok { + return errors.New("invalid UUID format") + } + } + + default: + return invalidLengthError{len(s)} + } + + // Check for standard UUID format + if len(s) == 36 { + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return errors.New("invalid UUID format") + } + for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} { + if _, ok := xtob(s[x], s[x+1]); !ok { + return errors.New("invalid UUID format") + } + } + } + + return nil +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + var buf [36]byte + encodeHex(buf[:], uuid) + return string(buf[:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + var buf [36 + 9]byte + copy(buf[:], "urn:uuid:") + encodeHex(buf[9:], uuid) + return string(buf[:]) +} + +func encodeHex(dst []byte, uuid UUID) { + hex.Encode(dst, uuid[:4]) + dst[8] = '-' + hex.Encode(dst[9:13], uuid[4:6]) + dst[13] = '-' + hex.Encode(dst[14:18], uuid[6:8]) + dst[18] = '-' + hex.Encode(dst[19:23], uuid[8:10]) + dst[23] = '-' + hex.Encode(dst[24:], uuid[10:]) +} + +// Variant returns the variant encoded in uuid. +func (uuid UUID) Variant() Variant { + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } +} + +// Version returns the version of uuid. +func (uuid UUID) Version() Version { + return Version(uuid[6] >> 4) +} + +func (v Version) String() string { + if v > 15 { + return fmt.Sprintf("BAD_VERSION_%d", v) + } + return fmt.Sprintf("VERSION_%d", v) +} + +func (v Variant) String() string { + switch v { + case RFC4122: + return "RFC4122" + case Reserved: + return "Reserved" + case Microsoft: + return "Microsoft" + case Future: + return "Future" + case Invalid: + return "Invalid" + } + return fmt.Sprintf("BadVariant%d", int(v)) +} + +// SetRand sets the random number generator to r, which implements io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + if r == nil { + rander = rand.Reader + return + } + rander = r +} + +// EnableRandPool enables internal randomness pool used for Random +// (Version 4) UUID generation. The pool contains random bytes read from +// the random number generator on demand in batches. Enabling the pool +// may improve the UUID generation throughput significantly. +// +// Since the pool is stored on the Go heap, this feature may be a bad fit +// for security sensitive applications. +// +// Both EnableRandPool and DisableRandPool are not thread-safe and should +// only be called when there is no possibility that New or any other +// UUID Version 4 generation function will be called concurrently. +func EnableRandPool() { + poolEnabled = true +} + +// DisableRandPool disables the randomness pool if it was previously +// enabled with EnableRandPool. +// +// Both EnableRandPool and DisableRandPool are not thread-safe and should +// only be called when there is no possibility that New or any other +// UUID Version 4 generation function will be called concurrently. +func DisableRandPool() { + poolEnabled = false + defer poolMu.Unlock() + poolMu.Lock() + poolPos = randPoolSize +} + +// UUIDs is a slice of UUID types. +type UUIDs []UUID + +// Strings returns a string slice containing the string form of each UUID in uuids. +func (uuids UUIDs) Strings() []string { + var uuidStrs = make([]string, len(uuids)) + for i, uuid := range uuids { + uuidStrs[i] = uuid.String() + } + return uuidStrs +} diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go new file mode 100644 index 0000000..4631096 --- /dev/null +++ b/vendor/github.com/google/uuid/version1.go @@ -0,0 +1,44 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil and an error. +// +// In most cases, New should be used. +func NewUUID() (UUID, error) { + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + timeLow := uint32(now & 0xffffffff) + timeMid := uint16((now >> 32) & 0xffff) + timeHi := uint16((now >> 48) & 0x0fff) + timeHi |= 0x1000 // Version 1 + + binary.BigEndian.PutUint32(uuid[0:], timeLow) + binary.BigEndian.PutUint16(uuid[4:], timeMid) + binary.BigEndian.PutUint16(uuid[6:], timeHi) + binary.BigEndian.PutUint16(uuid[8:], seq) + + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + copy(uuid[10:], nodeID[:]) + nodeMu.Unlock() + + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go new file mode 100644 index 0000000..7697802 --- /dev/null +++ b/vendor/github.com/google/uuid/version4.go @@ -0,0 +1,76 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "io" + +// New creates a new random UUID or panics. New is equivalent to +// the expression +// +// uuid.Must(uuid.NewRandom()) +func New() UUID { + return Must(NewRandom()) +} + +// NewString creates a new random UUID and returns it as a string or panics. +// NewString is equivalent to the expression +// +// uuid.New().String() +func NewString() string { + return Must(NewRandom()).String() +} + +// NewRandom returns a Random (Version 4) UUID. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// Uses the randomness pool if it was enabled with EnableRandPool. +// +// A note about uniqueness derived from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() (UUID, error) { + if !poolEnabled { + return NewRandomFromReader(rander) + } + return newRandomFromPool() +} + +// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader. +func NewRandomFromReader(r io.Reader) (UUID, error) { + var uuid UUID + _, err := io.ReadFull(r, uuid[:]) + if err != nil { + return Nil, err + } + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} + +func newRandomFromPool() (UUID, error) { + var uuid UUID + poolMu.Lock() + if poolPos == randPoolSize { + _, err := io.ReadFull(rander, pool[:]) + if err != nil { + poolMu.Unlock() + return Nil, err + } + poolPos = 0 + } + copy(uuid[:], pool[poolPos:(poolPos+16)]) + poolPos += 16 + poolMu.Unlock() + + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version6.go b/vendor/github.com/google/uuid/version6.go new file mode 100644 index 0000000..339a959 --- /dev/null +++ b/vendor/github.com/google/uuid/version6.go @@ -0,0 +1,56 @@ +// Copyright 2023 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "encoding/binary" + +// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality. +// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs. +// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead. +// +// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6 +// +// NewV6 returns a Version 6 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewV6 returns Nil and an error. +func NewV6() (UUID, error) { + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + /* + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | time_high | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | time_mid | time_low_and_version | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |clk_seq_hi_res | clk_seq_low | node (0-1) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | node (2-5) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ + + binary.BigEndian.PutUint64(uuid[0:], uint64(now)) + binary.BigEndian.PutUint16(uuid[8:], seq) + + uuid[6] = 0x60 | (uuid[6] & 0x0F) + uuid[8] = 0x80 | (uuid[8] & 0x3F) + + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + copy(uuid[10:], nodeID[:]) + nodeMu.Unlock() + + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version7.go b/vendor/github.com/google/uuid/version7.go new file mode 100644 index 0000000..3167b64 --- /dev/null +++ b/vendor/github.com/google/uuid/version7.go @@ -0,0 +1,104 @@ +// Copyright 2023 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// UUID version 7 features a time-ordered value field derived from the widely +// implemented and well known Unix Epoch timestamp source, +// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded. +// As well as improved entropy characteristics over versions 1 or 6. +// +// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7 +// +// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible. +// +// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch). +// Uses the randomness pool if it was enabled with EnableRandPool. +// On error, NewV7 returns Nil and an error +func NewV7() (UUID, error) { + uuid, err := NewRandom() + if err != nil { + return uuid, err + } + makeV7(uuid[:]) + return uuid, nil +} + +// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch). +// it use NewRandomFromReader fill random bits. +// On error, NewV7FromReader returns Nil and an error. +func NewV7FromReader(r io.Reader) (UUID, error) { + uuid, err := NewRandomFromReader(r) + if err != nil { + return uuid, err + } + + makeV7(uuid[:]) + return uuid, nil +} + +// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6]) +// uuid[8] already has the right version number (Variant is 10) +// see function NewV7 and NewV7FromReader +func makeV7(uuid []byte) { + /* + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | unix_ts_ms | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | unix_ts_ms | ver | rand_a (12 bit seq) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |var| rand_b | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | rand_b | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ + _ = uuid[15] // bounds check + + t, s := getV7Time() + + uuid[0] = byte(t >> 40) + uuid[1] = byte(t >> 32) + uuid[2] = byte(t >> 24) + uuid[3] = byte(t >> 16) + uuid[4] = byte(t >> 8) + uuid[5] = byte(t) + + uuid[6] = 0x70 | (0x0F & byte(s>>8)) + uuid[7] = byte(s) +} + +// lastV7time is the last time we returned stored as: +// +// 52 bits of time in milliseconds since epoch +// 12 bits of (fractional nanoseconds) >> 8 +var lastV7time int64 + +const nanoPerMilli = 1000000 + +// getV7Time returns the time in milliseconds and nanoseconds / 256. +// The returned (milli << 12 + seq) is guarenteed to be greater than +// (milli << 12 + seq) returned by any previous call to getV7Time. +func getV7Time() (milli, seq int64) { + timeMu.Lock() + defer timeMu.Unlock() + + nano := timeNow().UnixNano() + milli = nano / nanoPerMilli + // Sequence number is between 0 and 3906 (nanoPerMilli>>8) + seq = (nano - milli*nanoPerMilli) >> 8 + now := milli<<12 + seq + if now <= lastV7time { + now = lastV7time + 1 + milli = now >> 12 + seq = now & 0xfff + } + lastV7time = now + return milli, seq +} diff --git a/vendor/github.com/gorilla/handlers/.editorconfig b/vendor/github.com/gorilla/handlers/.editorconfig new file mode 100644 index 0000000..c6b74c3 --- /dev/null +++ b/vendor/github.com/gorilla/handlers/.editorconfig @@ -0,0 +1,20 @@ +; https://editorconfig.org/ + +root = true + +[*] +insert_final_newline = true +charset = utf-8 +trim_trailing_whitespace = true +indent_style = space +indent_size = 2 + +[{Makefile,go.mod,go.sum,*.go,.gitmodules}] +indent_style = tab +indent_size = 4 + +[*.md] +indent_size = 4 +trim_trailing_whitespace = false + +eclint_indent_style = unset \ No newline at end of file diff --git a/vendor/github.com/gorilla/handlers/.gitignore b/vendor/github.com/gorilla/handlers/.gitignore new file mode 100644 index 0000000..577a89e --- /dev/null +++ b/vendor/github.com/gorilla/handlers/.gitignore @@ -0,0 +1,2 @@ +# Output of the go test coverage tool +coverage.coverprofile diff --git a/vendor/github.com/gorilla/handlers/LICENSE b/vendor/github.com/gorilla/handlers/LICENSE new file mode 100644 index 0000000..bb9d80b --- /dev/null +++ b/vendor/github.com/gorilla/handlers/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2023 The Gorilla Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/handlers/Makefile b/vendor/github.com/gorilla/handlers/Makefile new file mode 100644 index 0000000..003b784 --- /dev/null +++ b/vendor/github.com/gorilla/handlers/Makefile @@ -0,0 +1,34 @@ +GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '') +GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest + +GO_SEC=$(shell which gosec 2> /dev/null || echo '') +GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest + +GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '') +GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest + +.PHONY: verify +verify: sec govulncheck lint test + +.PHONY: lint +lint: + $(if $(GO_LINT), ,go install $(GO_LINT_URI)) + @echo "##### Running golangci-lint #####" + golangci-lint run -v + +.PHONY: sec +sec: + $(if $(GO_SEC), ,go install $(GO_SEC_URI)) + @echo "##### Running gosec #####" + gosec ./... + +.PHONY: govulncheck +govulncheck: + $(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI)) + @echo "##### Running govulncheck #####" + govulncheck ./... + +.PHONY: test +test: + @echo "##### Running tests #####" + go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./... diff --git a/vendor/github.com/gorilla/handlers/README.md b/vendor/github.com/gorilla/handlers/README.md new file mode 100644 index 0000000..02555b2 --- /dev/null +++ b/vendor/github.com/gorilla/handlers/README.md @@ -0,0 +1,56 @@ +# gorilla/handlers + +![Testing](https://github.com/gorilla/handlers/actions/workflows/test.yml/badge.svg) +[![Codecov](https://codecov.io/github/gorilla/handlers/branch/main/graph/badge.svg)](https://codecov.io/github/gorilla/handlers) +[![GoDoc](https://godoc.org/github.com/gorilla/handlers?status.svg)](https://godoc.org/github.com/gorilla/handlers) +[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/handlers/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/handlers?badge) + +Package handlers is a collection of handlers (aka "HTTP middleware") for use +with Go's `net/http` package (or any framework supporting `http.Handler`), including: + +* [**LoggingHandler**](https://godoc.org/github.com/gorilla/handlers#LoggingHandler) for logging HTTP requests in the Apache [Common Log + Format](http://httpd.apache.org/docs/2.2/logs.html#common). +* [**CombinedLoggingHandler**](https://godoc.org/github.com/gorilla/handlers#CombinedLoggingHandler) for logging HTTP requests in the Apache [Combined Log + Format](http://httpd.apache.org/docs/2.2/logs.html#combined) commonly used by + both Apache and nginx. +* [**CompressHandler**](https://godoc.org/github.com/gorilla/handlers#CompressHandler) for gzipping responses. +* [**ContentTypeHandler**](https://godoc.org/github.com/gorilla/handlers#ContentTypeHandler) for validating requests against a list of accepted + content types. +* [**MethodHandler**](https://godoc.org/github.com/gorilla/handlers#MethodHandler) for matching HTTP methods against handlers in a + `map[string]http.Handler` +* [**ProxyHeaders**](https://godoc.org/github.com/gorilla/handlers#ProxyHeaders) for populating `r.RemoteAddr` and `r.URL.Scheme` based on the + `X-Forwarded-For`, `X-Real-IP`, `X-Forwarded-Proto` and RFC7239 `Forwarded` + headers when running a Go server behind a HTTP reverse proxy. +* [**CanonicalHost**](https://godoc.org/github.com/gorilla/handlers#CanonicalHost) for re-directing to the preferred host when handling multiple + domains (i.e. multiple CNAME aliases). +* [**RecoveryHandler**](https://godoc.org/github.com/gorilla/handlers#RecoveryHandler) for recovering from unexpected panics. + +Other handlers are documented [on the Gorilla +website](https://www.gorillatoolkit.org/pkg/handlers). + +## Example + +A simple example using `handlers.LoggingHandler` and `handlers.CompressHandler`: + +```go +import ( + "net/http" + "github.com/gorilla/handlers" +) + +func main() { + r := http.NewServeMux() + + // Only log requests to our admin dashboard to stdout + r.Handle("/admin", handlers.LoggingHandler(os.Stdout, http.HandlerFunc(ShowAdminDashboard))) + r.HandleFunc("/", ShowIndex) + + // Wrap our server with our gzip handler to gzip compress all responses. + http.ListenAndServe(":8000", handlers.CompressHandler(r)) +} +``` + +## License + +BSD licensed. See the included LICENSE file for details. + diff --git a/vendor/github.com/gorilla/handlers/canonical.go b/vendor/github.com/gorilla/handlers/canonical.go new file mode 100644 index 0000000..7121f53 --- /dev/null +++ b/vendor/github.com/gorilla/handlers/canonical.go @@ -0,0 +1,73 @@ +package handlers + +import ( + "net/http" + "net/url" + "strings" +) + +type canonical struct { + h http.Handler + domain string + code int +} + +// CanonicalHost is HTTP middleware that re-directs requests to the canonical +// domain. It accepts a domain and a status code (e.g. 301 or 302) and +// re-directs clients to this domain. The existing request path is maintained. +// +// Note: If the provided domain is considered invalid by url.Parse or otherwise +// returns an empty scheme or host, clients are not re-directed. +// +// Example: +// +// r := mux.NewRouter() +// canonical := handlers.CanonicalHost("http://www.gorillatoolkit.org", 302) +// r.HandleFunc("/route", YourHandler) +// +// log.Fatal(http.ListenAndServe(":7000", canonical(r))) +func CanonicalHost(domain string, code int) func(h http.Handler) http.Handler { + fn := func(h http.Handler) http.Handler { + return canonical{h, domain, code} + } + + return fn +} + +func (c canonical) ServeHTTP(w http.ResponseWriter, r *http.Request) { + dest, err := url.Parse(c.domain) + if err != nil { + // Call the next handler if the provided domain fails to parse. + c.h.ServeHTTP(w, r) + return + } + + if dest.Scheme == "" || dest.Host == "" { + // Call the next handler if the scheme or host are empty. + // Note that url.Parse won't fail on in this case. + c.h.ServeHTTP(w, r) + return + } + + if !strings.EqualFold(cleanHost(r.Host), dest.Host) { + // Re-build the destination URL + dest := dest.Scheme + "://" + dest.Host + r.URL.Path + if r.URL.RawQuery != "" { + dest += "?" + r.URL.RawQuery + } + http.Redirect(w, r, dest, c.code) + return + } + + c.h.ServeHTTP(w, r) +} + +// cleanHost cleans invalid Host headers by stripping anything after '/' or ' '. +// This is backported from Go 1.5 (in response to issue #11206) and attempts to +// mitigate malformed Host headers that do not match the format in RFC7230. +func cleanHost(in string) string { + if i := strings.IndexAny(in, " /"); i != -1 { + return in[:i] + } + return in +} diff --git a/vendor/github.com/gorilla/handlers/compress.go b/vendor/github.com/gorilla/handlers/compress.go new file mode 100644 index 0000000..d6f5895 --- /dev/null +++ b/vendor/github.com/gorilla/handlers/compress.go @@ -0,0 +1,143 @@ +// Copyright 2013 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package handlers + +import ( + "compress/flate" + "compress/gzip" + "io" + "net/http" + "strings" + + "github.com/felixge/httpsnoop" +) + +const acceptEncoding string = "Accept-Encoding" + +type compressResponseWriter struct { + compressor io.Writer + w http.ResponseWriter +} + +func (cw *compressResponseWriter) WriteHeader(c int) { + cw.w.Header().Del("Content-Length") + cw.w.WriteHeader(c) +} + +func (cw *compressResponseWriter) Write(b []byte) (int, error) { + h := cw.w.Header() + if h.Get("Content-Type") == "" { + h.Set("Content-Type", http.DetectContentType(b)) + } + h.Del("Content-Length") + + return cw.compressor.Write(b) +} + +func (cw *compressResponseWriter) ReadFrom(r io.Reader) (int64, error) { + return io.Copy(cw.compressor, r) +} + +type flusher interface { + Flush() error +} + +func (cw *compressResponseWriter) Flush() { + // Flush compressed data if compressor supports it. + if f, ok := cw.compressor.(flusher); ok { + _ = f.Flush() + } + // Flush HTTP response. + if f, ok := cw.w.(http.Flusher); ok { + f.Flush() + } +} + +// CompressHandler gzip compresses HTTP responses for clients that support it +// via the 'Accept-Encoding' header. +// +// Compressing TLS traffic may leak the page contents to an attacker if the +// page contains user input: http://security.stackexchange.com/a/102015/12208 +func CompressHandler(h http.Handler) http.Handler { + return CompressHandlerLevel(h, gzip.DefaultCompression) +} + +// CompressHandlerLevel gzip compresses HTTP responses with specified compression level +// for clients that support it via the 'Accept-Encoding' header. +// +// The compression level should be gzip.DefaultCompression, gzip.NoCompression, +// or any integer value between gzip.BestSpeed and gzip.BestCompression inclusive. +// gzip.DefaultCompression is used in case of invalid compression level. +func CompressHandlerLevel(h http.Handler, level int) http.Handler { + if level < gzip.DefaultCompression || level > gzip.BestCompression { + level = gzip.DefaultCompression + } + + const ( + gzipEncoding = "gzip" + flateEncoding = "deflate" + ) + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // detect what encoding to use + var encoding string + for _, curEnc := range strings.Split(r.Header.Get(acceptEncoding), ",") { + curEnc = strings.TrimSpace(curEnc) + if curEnc == gzipEncoding || curEnc == flateEncoding { + encoding = curEnc + break + } + } + + // always add Accept-Encoding to Vary to prevent intermediate caches corruption + w.Header().Add("Vary", acceptEncoding) + + // if we weren't able to identify an encoding we're familiar with, pass on the + // request to the handler and return + if encoding == "" { + h.ServeHTTP(w, r) + return + } + + if r.Header.Get("Upgrade") != "" { + h.ServeHTTP(w, r) + return + } + + // wrap the ResponseWriter with the writer for the chosen encoding + var encWriter io.WriteCloser + if encoding == gzipEncoding { + encWriter, _ = gzip.NewWriterLevel(w, level) + } else if encoding == flateEncoding { + encWriter, _ = flate.NewWriter(w, level) + } + defer encWriter.Close() + + w.Header().Set("Content-Encoding", encoding) + r.Header.Del(acceptEncoding) + + cw := &compressResponseWriter{ + w: w, + compressor: encWriter, + } + + w = httpsnoop.Wrap(w, httpsnoop.Hooks{ + Write: func(httpsnoop.WriteFunc) httpsnoop.WriteFunc { + return cw.Write + }, + WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc { + return cw.WriteHeader + }, + Flush: func(httpsnoop.FlushFunc) httpsnoop.FlushFunc { + return cw.Flush + }, + ReadFrom: func(rff httpsnoop.ReadFromFunc) httpsnoop.ReadFromFunc { + return cw.ReadFrom + }, + }) + + h.ServeHTTP(w, r) + }) +} diff --git a/vendor/github.com/gorilla/handlers/cors.go b/vendor/github.com/gorilla/handlers/cors.go new file mode 100644 index 0000000..8af9c09 --- /dev/null +++ b/vendor/github.com/gorilla/handlers/cors.go @@ -0,0 +1,352 @@ +package handlers + +import ( + "net/http" + "strconv" + "strings" +) + +// CORSOption represents a functional option for configuring the CORS middleware. +type CORSOption func(*cors) error + +type cors struct { + h http.Handler + allowedHeaders []string + allowedMethods []string + allowedOrigins []string + allowedOriginValidator OriginValidator + exposedHeaders []string + maxAge int + ignoreOptions bool + allowCredentials bool + optionStatusCode int +} + +// OriginValidator takes an origin string and returns whether or not that origin is allowed. +type OriginValidator func(string) bool + +var ( + defaultCorsOptionStatusCode = http.StatusOK + defaultCorsMethods = []string{http.MethodGet, http.MethodHead, http.MethodPost} + defaultCorsHeaders = []string{"Accept", "Accept-Language", "Content-Language", "Origin"} + // (WebKit/Safari v9 sends the Origin header by default in AJAX requests). +) + +const ( + corsOptionMethod string = http.MethodOptions + corsAllowOriginHeader string = "Access-Control-Allow-Origin" + corsExposeHeadersHeader string = "Access-Control-Expose-Headers" + corsMaxAgeHeader string = "Access-Control-Max-Age" + corsAllowMethodsHeader string = "Access-Control-Allow-Methods" + corsAllowHeadersHeader string = "Access-Control-Allow-Headers" + corsAllowCredentialsHeader string = "Access-Control-Allow-Credentials" + corsRequestMethodHeader string = "Access-Control-Request-Method" + corsRequestHeadersHeader string = "Access-Control-Request-Headers" + corsOriginHeader string = "Origin" + corsVaryHeader string = "Vary" + corsOriginMatchAll string = "*" +) + +func (ch *cors) ServeHTTP(w http.ResponseWriter, r *http.Request) { + origin := r.Header.Get(corsOriginHeader) + if !ch.isOriginAllowed(origin) { + if r.Method != corsOptionMethod || ch.ignoreOptions { + ch.h.ServeHTTP(w, r) + } + + return + } + + if r.Method == corsOptionMethod { + if ch.ignoreOptions { + ch.h.ServeHTTP(w, r) + return + } + + if _, ok := r.Header[corsRequestMethodHeader]; !ok { + w.WriteHeader(http.StatusBadRequest) + return + } + + method := r.Header.Get(corsRequestMethodHeader) + if !ch.isMatch(method, ch.allowedMethods) { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + requestHeaders := strings.Split(r.Header.Get(corsRequestHeadersHeader), ",") + allowedHeaders := []string{} + for _, v := range requestHeaders { + canonicalHeader := http.CanonicalHeaderKey(strings.TrimSpace(v)) + if canonicalHeader == "" || ch.isMatch(canonicalHeader, defaultCorsHeaders) { + continue + } + + if !ch.isMatch(canonicalHeader, ch.allowedHeaders) { + w.WriteHeader(http.StatusForbidden) + return + } + + allowedHeaders = append(allowedHeaders, canonicalHeader) + } + + if len(allowedHeaders) > 0 { + w.Header().Set(corsAllowHeadersHeader, strings.Join(allowedHeaders, ",")) + } + + if ch.maxAge > 0 { + w.Header().Set(corsMaxAgeHeader, strconv.Itoa(ch.maxAge)) + } + + if !ch.isMatch(method, defaultCorsMethods) { + w.Header().Set(corsAllowMethodsHeader, method) + } + } else if len(ch.exposedHeaders) > 0 { + w.Header().Set(corsExposeHeadersHeader, strings.Join(ch.exposedHeaders, ",")) + } + + if ch.allowCredentials { + w.Header().Set(corsAllowCredentialsHeader, "true") + } + + if len(ch.allowedOrigins) > 1 { + w.Header().Set(corsVaryHeader, corsOriginHeader) + } + + returnOrigin := origin + if ch.allowedOriginValidator == nil && len(ch.allowedOrigins) == 0 { + returnOrigin = "*" + } else { + for _, o := range ch.allowedOrigins { + // A configuration of * is different than explicitly setting an allowed + // origin. Returning arbitrary origin headers in an access control allow + // origin header is unsafe and is not required by any use case. + if o == corsOriginMatchAll { + returnOrigin = "*" + break + } + } + } + w.Header().Set(corsAllowOriginHeader, returnOrigin) + + if r.Method == corsOptionMethod { + w.WriteHeader(ch.optionStatusCode) + return + } + ch.h.ServeHTTP(w, r) +} + +// CORS provides Cross-Origin Resource Sharing middleware. +// Example: +// +// import ( +// "net/http" +// +// "github.com/gorilla/handlers" +// "github.com/gorilla/mux" +// ) +// +// func main() { +// r := mux.NewRouter() +// r.HandleFunc("/users", UserEndpoint) +// r.HandleFunc("/projects", ProjectEndpoint) +// +// // Apply the CORS middleware to our top-level router, with the defaults. +// http.ListenAndServe(":8000", handlers.CORS()(r)) +// } +func CORS(opts ...CORSOption) func(http.Handler) http.Handler { + return func(h http.Handler) http.Handler { + ch := parseCORSOptions(opts...) + ch.h = h + return ch + } +} + +func parseCORSOptions(opts ...CORSOption) *cors { + ch := &cors{ + allowedMethods: defaultCorsMethods, + allowedHeaders: defaultCorsHeaders, + allowedOrigins: []string{}, + optionStatusCode: defaultCorsOptionStatusCode, + } + + for _, option := range opts { + _ = option(ch) //TODO: @bharat-rajani, return error to caller if not nil? + } + + return ch +} + +// +// Functional options for configuring CORS. +// + +// AllowedHeaders adds the provided headers to the list of allowed headers in a +// CORS request. +// This is an append operation so the headers Accept, Accept-Language, +// and Content-Language are always allowed. +// Content-Type must be explicitly declared if accepting Content-Types other than +// application/x-www-form-urlencoded, multipart/form-data, or text/plain. +func AllowedHeaders(headers []string) CORSOption { + return func(ch *cors) error { + for _, v := range headers { + normalizedHeader := http.CanonicalHeaderKey(strings.TrimSpace(v)) + if normalizedHeader == "" { + continue + } + + if !ch.isMatch(normalizedHeader, ch.allowedHeaders) { + ch.allowedHeaders = append(ch.allowedHeaders, normalizedHeader) + } + } + + return nil + } +} + +// AllowedMethods can be used to explicitly allow methods in the +// Access-Control-Allow-Methods header. +// This is a replacement operation so you must also +// pass GET, HEAD, and POST if you wish to support those methods. +func AllowedMethods(methods []string) CORSOption { + return func(ch *cors) error { + ch.allowedMethods = []string{} + for _, v := range methods { + normalizedMethod := strings.ToUpper(strings.TrimSpace(v)) + if normalizedMethod == "" { + continue + } + + if !ch.isMatch(normalizedMethod, ch.allowedMethods) { + ch.allowedMethods = append(ch.allowedMethods, normalizedMethod) + } + } + + return nil + } +} + +// AllowedOrigins sets the allowed origins for CORS requests, as used in the +// 'Allow-Access-Control-Origin' HTTP header. +// Note: Passing in a []string{"*"} will allow any domain. +func AllowedOrigins(origins []string) CORSOption { + return func(ch *cors) error { + for _, v := range origins { + if v == corsOriginMatchAll { + ch.allowedOrigins = []string{corsOriginMatchAll} + return nil + } + } + + ch.allowedOrigins = origins + return nil + } +} + +// AllowedOriginValidator sets a function for evaluating allowed origins in CORS requests, represented by the +// 'Allow-Access-Control-Origin' HTTP header. +func AllowedOriginValidator(fn OriginValidator) CORSOption { + return func(ch *cors) error { + ch.allowedOriginValidator = fn + return nil + } +} + +// OptionStatusCode sets a custom status code on the OPTIONS requests. +// Default behaviour sets it to 200 to reflect best practices. This is option is not mandatory +// and can be used if you need a custom status code (i.e 204). +// +// More informations on the spec: +// https://fetch.spec.whatwg.org/#cors-preflight-fetch +func OptionStatusCode(code int) CORSOption { + return func(ch *cors) error { + ch.optionStatusCode = code + return nil + } +} + +// ExposedHeaders can be used to specify headers that are available +// and will not be stripped out by the user-agent. +func ExposedHeaders(headers []string) CORSOption { + return func(ch *cors) error { + ch.exposedHeaders = []string{} + for _, v := range headers { + normalizedHeader := http.CanonicalHeaderKey(strings.TrimSpace(v)) + if normalizedHeader == "" { + continue + } + + if !ch.isMatch(normalizedHeader, ch.exposedHeaders) { + ch.exposedHeaders = append(ch.exposedHeaders, normalizedHeader) + } + } + + return nil + } +} + +// MaxAge determines the maximum age (in seconds) between preflight requests. A +// maximum of 10 minutes is allowed. An age above this value will default to 10 +// minutes. +func MaxAge(age int) CORSOption { + return func(ch *cors) error { + // Maximum of 10 minutes. + if age > 600 { + age = 600 + } + + ch.maxAge = age + return nil + } +} + +// IgnoreOptions causes the CORS middleware to ignore OPTIONS requests, instead +// passing them through to the next handler. This is useful when your application +// or framework has a pre-existing mechanism for responding to OPTIONS requests. +func IgnoreOptions() CORSOption { + return func(ch *cors) error { + ch.ignoreOptions = true + return nil + } +} + +// AllowCredentials can be used to specify that the user agent may pass +// authentication details along with the request. +func AllowCredentials() CORSOption { + return func(ch *cors) error { + ch.allowCredentials = true + return nil + } +} + +func (ch *cors) isOriginAllowed(origin string) bool { + if origin == "" { + return false + } + + if ch.allowedOriginValidator != nil { + return ch.allowedOriginValidator(origin) + } + + if len(ch.allowedOrigins) == 0 { + return true + } + + for _, allowedOrigin := range ch.allowedOrigins { + if allowedOrigin == origin || allowedOrigin == corsOriginMatchAll { + return true + } + } + + return false +} + +func (ch *cors) isMatch(needle string, haystack []string) bool { + for _, v := range haystack { + if v == needle { + return true + } + } + + return false +} diff --git a/vendor/github.com/gorilla/handlers/doc.go b/vendor/github.com/gorilla/handlers/doc.go new file mode 100644 index 0000000..944e5a8 --- /dev/null +++ b/vendor/github.com/gorilla/handlers/doc.go @@ -0,0 +1,9 @@ +/* +Package handlers is a collection of handlers (aka "HTTP middleware") for use +with Go's net/http package (or any framework supporting http.Handler). + +The package includes handlers for logging in standardised formats, compressing +HTTP responses, validating content types and other useful tools for manipulating +requests and responses. +*/ +package handlers diff --git a/vendor/github.com/gorilla/handlers/handlers.go b/vendor/github.com/gorilla/handlers/handlers.go new file mode 100644 index 0000000..9b92fce --- /dev/null +++ b/vendor/github.com/gorilla/handlers/handlers.go @@ -0,0 +1,150 @@ +// Copyright 2013 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package handlers + +import ( + "bufio" + "fmt" + "net" + "net/http" + "sort" + "strings" +) + +// MethodHandler is an http.Handler that dispatches to a handler whose key in the +// MethodHandler's map matches the name of the HTTP request's method, eg: GET +// +// If the request's method is OPTIONS and OPTIONS is not a key in the map then +// the handler responds with a status of 200 and sets the Allow header to a +// comma-separated list of available methods. +// +// If the request's method doesn't match any of its keys the handler responds +// with a status of HTTP 405 "Method Not Allowed" and sets the Allow header to a +// comma-separated list of available methods. +type MethodHandler map[string]http.Handler + +func (h MethodHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + if handler, ok := h[req.Method]; ok { + handler.ServeHTTP(w, req) + } else { + allow := []string{} + for k := range h { + allow = append(allow, k) + } + sort.Strings(allow) + w.Header().Set("Allow", strings.Join(allow, ", ")) + if req.Method == http.MethodOptions { + w.WriteHeader(http.StatusOK) + } else { + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + } + } +} + +// responseLogger is wrapper of http.ResponseWriter that keeps track of its HTTP +// status code and body size. +type responseLogger struct { + w http.ResponseWriter + status int + size int +} + +func (l *responseLogger) Write(b []byte) (int, error) { + size, err := l.w.Write(b) + l.size += size + return size, err +} + +func (l *responseLogger) WriteHeader(s int) { + l.w.WriteHeader(s) + l.status = s +} + +func (l *responseLogger) Status() int { + return l.status +} + +func (l *responseLogger) Size() int { + return l.size +} + +func (l *responseLogger) Hijack() (net.Conn, *bufio.ReadWriter, error) { + conn, rw, err := l.w.(http.Hijacker).Hijack() + if err == nil && l.status == 0 { + // The status will be StatusSwitchingProtocols if there was no error and + // WriteHeader has not been called yet + l.status = http.StatusSwitchingProtocols + } + return conn, rw, err +} + +// isContentType validates the Content-Type header matches the supplied +// contentType. That is, its type and subtype match. +func isContentType(h http.Header, contentType string) bool { + ct := h.Get("Content-Type") + if i := strings.IndexRune(ct, ';'); i != -1 { + ct = ct[0:i] + } + return ct == contentType +} + +// ContentTypeHandler wraps and returns a http.Handler, validating the request +// content type is compatible with the contentTypes list. It writes a HTTP 415 +// error if that fails. +// +// Only PUT, POST, and PATCH requests are considered. +func ContentTypeHandler(h http.Handler, contentTypes ...string) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !(r.Method == http.MethodPut || r.Method == http.MethodPost || r.Method == http.MethodPatch) { + h.ServeHTTP(w, r) + return + } + + for _, ct := range contentTypes { + if isContentType(r.Header, ct) { + h.ServeHTTP(w, r) + return + } + } + http.Error(w, fmt.Sprintf("Unsupported content type %q; expected one of %q", + r.Header.Get("Content-Type"), + contentTypes), + http.StatusUnsupportedMediaType) + }) +} + +const ( + // HTTPMethodOverrideHeader is a commonly used + // http header to override a request method. + HTTPMethodOverrideHeader = "X-HTTP-Method-Override" + // HTTPMethodOverrideFormKey is a commonly used + // HTML form key to override a request method. + HTTPMethodOverrideFormKey = "_method" +) + +// HTTPMethodOverrideHandler wraps and returns a http.Handler which checks for +// the X-HTTP-Method-Override header or the _method form key, and overrides (if +// valid) request.Method with its value. +// +// This is especially useful for HTTP clients that don't support many http verbs. +// It isn't secure to override e.g a GET to a POST, so only POST requests are +// considered. Likewise, the override method can only be a "write" method: PUT, +// PATCH or DELETE. +// +// Form method takes precedence over header method. +func HTTPMethodOverrideHandler(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == http.MethodPost { + om := r.FormValue(HTTPMethodOverrideFormKey) + if om == "" { + om = r.Header.Get(HTTPMethodOverrideHeader) + } + if om == http.MethodPut || om == http.MethodPatch || om == http.MethodDelete { + r.Method = om + } + } + h.ServeHTTP(w, r) + }) +} diff --git a/vendor/github.com/gorilla/handlers/logging.go b/vendor/github.com/gorilla/handlers/logging.go new file mode 100644 index 0000000..2badb6f --- /dev/null +++ b/vendor/github.com/gorilla/handlers/logging.go @@ -0,0 +1,246 @@ +// Copyright 2013 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package handlers + +import ( + "io" + "net" + "net/http" + "net/url" + "strconv" + "time" + "unicode/utf8" + + "github.com/felixge/httpsnoop" +) + +// Logging + +// LogFormatterParams is the structure any formatter will be handed when time to log comes. +type LogFormatterParams struct { + Request *http.Request + URL url.URL + TimeStamp time.Time + StatusCode int + Size int +} + +// LogFormatter gives the signature of the formatter function passed to CustomLoggingHandler. +type LogFormatter func(writer io.Writer, params LogFormatterParams) + +// loggingHandler is the http.Handler implementation for LoggingHandlerTo and its +// friends + +type loggingHandler struct { + writer io.Writer + handler http.Handler + formatter LogFormatter +} + +func (h loggingHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + t := time.Now() + logger, w := makeLogger(w) + url := *req.URL + + h.handler.ServeHTTP(w, req) + if req.MultipartForm != nil { + err := req.MultipartForm.RemoveAll() + if err != nil { + return + } + } + + params := LogFormatterParams{ + Request: req, + URL: url, + TimeStamp: t, + StatusCode: logger.Status(), + Size: logger.Size(), + } + + h.formatter(h.writer, params) +} + +func makeLogger(w http.ResponseWriter) (*responseLogger, http.ResponseWriter) { + logger := &responseLogger{w: w, status: http.StatusOK} + return logger, httpsnoop.Wrap(w, httpsnoop.Hooks{ + Write: func(httpsnoop.WriteFunc) httpsnoop.WriteFunc { + return logger.Write + }, + WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc { + return logger.WriteHeader + }, + }) +} + +const lowerhex = "0123456789abcdef" + +func appendQuoted(buf []byte, s string) []byte { + var runeTmp [utf8.UTFMax]byte + for width := 0; len(s) > 0; s = s[width:] { //nolint: wastedassign //TODO: why width starts from 0and reassigned as 1 + r := rune(s[0]) + width = 1 + if r >= utf8.RuneSelf { + r, width = utf8.DecodeRuneInString(s) + } + if width == 1 && r == utf8.RuneError { + buf = append(buf, `\x`...) + buf = append(buf, lowerhex[s[0]>>4]) + buf = append(buf, lowerhex[s[0]&0xF]) + continue + } + if r == rune('"') || r == '\\' { // always backslashed + buf = append(buf, '\\') + buf = append(buf, byte(r)) + continue + } + if strconv.IsPrint(r) { + n := utf8.EncodeRune(runeTmp[:], r) + buf = append(buf, runeTmp[:n]...) + continue + } + switch r { + case '\a': + buf = append(buf, `\a`...) + case '\b': + buf = append(buf, `\b`...) + case '\f': + buf = append(buf, `\f`...) + case '\n': + buf = append(buf, `\n`...) + case '\r': + buf = append(buf, `\r`...) + case '\t': + buf = append(buf, `\t`...) + case '\v': + buf = append(buf, `\v`...) + default: + switch { + case r < ' ': + buf = append(buf, `\x`...) + buf = append(buf, lowerhex[s[0]>>4]) + buf = append(buf, lowerhex[s[0]&0xF]) + case r > utf8.MaxRune: + r = 0xFFFD + fallthrough + case r < 0x10000: + buf = append(buf, `\u`...) + for s := 12; s >= 0; s -= 4 { + buf = append(buf, lowerhex[r>>uint(s)&0xF]) + } + default: + buf = append(buf, `\U`...) + for s := 28; s >= 0; s -= 4 { + buf = append(buf, lowerhex[r>>uint(s)&0xF]) + } + } + } + } + return buf +} + +// buildCommonLogLine builds a log entry for req in Apache Common Log Format. +// ts is the timestamp with which the entry should be logged. +// status and size are used to provide the response HTTP status and size. +func buildCommonLogLine(req *http.Request, url url.URL, ts time.Time, status int, size int) []byte { + username := "-" + if url.User != nil { + if name := url.User.Username(); name != "" { + username = name + } + } + + host, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil { + host = req.RemoteAddr + } + + uri := req.RequestURI + + // Requests using the CONNECT method over HTTP/2.0 must use + // the authority field (aka r.Host) to identify the target. + // Refer: https://httpwg.github.io/specs/rfc7540.html#CONNECT + if req.ProtoMajor == 2 && req.Method == "CONNECT" { + uri = req.Host + } + if uri == "" { + uri = url.RequestURI() + } + + buf := make([]byte, 0, 3*(len(host)+len(username)+len(req.Method)+len(uri)+len(req.Proto)+50)/2) + buf = append(buf, host...) + buf = append(buf, " - "...) + buf = append(buf, username...) + buf = append(buf, " ["...) + buf = append(buf, ts.Format("02/Jan/2006:15:04:05 -0700")...) + buf = append(buf, `] "`...) + buf = append(buf, req.Method...) + buf = append(buf, " "...) + buf = appendQuoted(buf, uri) + buf = append(buf, " "...) + buf = append(buf, req.Proto...) + buf = append(buf, `" `...) + buf = append(buf, strconv.Itoa(status)...) + buf = append(buf, " "...) + buf = append(buf, strconv.Itoa(size)...) + return buf +} + +// writeLog writes a log entry for req to w in Apache Common Log Format. +// ts is the timestamp with which the entry should be logged. +// status and size are used to provide the response HTTP status and size. +func writeLog(writer io.Writer, params LogFormatterParams) { + buf := buildCommonLogLine(params.Request, params.URL, params.TimeStamp, params.StatusCode, params.Size) + buf = append(buf, '\n') + _, _ = writer.Write(buf) +} + +// writeCombinedLog writes a log entry for req to w in Apache Combined Log Format. +// ts is the timestamp with which the entry should be logged. +// status and size are used to provide the response HTTP status and size. +func writeCombinedLog(writer io.Writer, params LogFormatterParams) { + buf := buildCommonLogLine(params.Request, params.URL, params.TimeStamp, params.StatusCode, params.Size) + buf = append(buf, ` "`...) + buf = appendQuoted(buf, params.Request.Referer()) + buf = append(buf, `" "`...) + buf = appendQuoted(buf, params.Request.UserAgent()) + buf = append(buf, '"', '\n') + _, _ = writer.Write(buf) +} + +// CombinedLoggingHandler return a http.Handler that wraps h and logs requests to out in +// Apache Combined Log Format. +// +// See http://httpd.apache.org/docs/2.2/logs.html#combined for a description of this format. +// +// LoggingHandler always sets the ident field of the log to -. +func CombinedLoggingHandler(out io.Writer, h http.Handler) http.Handler { + return loggingHandler{out, h, writeCombinedLog} +} + +// LoggingHandler return a http.Handler that wraps h and logs requests to out in +// Apache Common Log Format (CLF). +// +// See http://httpd.apache.org/docs/2.2/logs.html#common for a description of this format. +// +// LoggingHandler always sets the ident field of the log to - +// +// Example: +// +// r := mux.NewRouter() +// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { +// w.Write([]byte("This is a catch-all route")) +// }) +// loggedRouter := handlers.LoggingHandler(os.Stdout, r) +// http.ListenAndServe(":1123", loggedRouter) +func LoggingHandler(out io.Writer, h http.Handler) http.Handler { + return loggingHandler{out, h, writeLog} +} + +// CustomLoggingHandler provides a way to supply a custom log formatter +// while taking advantage of the mechanisms in this package. +func CustomLoggingHandler(out io.Writer, h http.Handler, f LogFormatter) http.Handler { + return loggingHandler{out, h, f} +} diff --git a/vendor/github.com/gorilla/handlers/proxy_headers.go b/vendor/github.com/gorilla/handlers/proxy_headers.go new file mode 100644 index 0000000..281d753 --- /dev/null +++ b/vendor/github.com/gorilla/handlers/proxy_headers.go @@ -0,0 +1,120 @@ +package handlers + +import ( + "net/http" + "regexp" + "strings" +) + +var ( + // De-facto standard header keys. + xForwardedFor = http.CanonicalHeaderKey("X-Forwarded-For") + xForwardedHost = http.CanonicalHeaderKey("X-Forwarded-Host") + xForwardedProto = http.CanonicalHeaderKey("X-Forwarded-Proto") + xForwardedScheme = http.CanonicalHeaderKey("X-Forwarded-Scheme") + xRealIP = http.CanonicalHeaderKey("X-Real-IP") +) + +var ( + // RFC7239 defines a new "Forwarded: " header designed to replace the + // existing use of X-Forwarded-* headers. + // e.g. Forwarded: for=192.0.2.60;proto=https;by=203.0.113.43. + forwarded = http.CanonicalHeaderKey("Forwarded") + // Allows for a sub-match of the first value after 'for=' to the next + // comma, semi-colon or space. The match is case-insensitive. + forRegex = regexp.MustCompile(`(?i)(?:for=)([^(;|,| )]+)`) + // Allows for a sub-match for the first instance of scheme (http|https) + // prefixed by 'proto='. The match is case-insensitive. + protoRegex = regexp.MustCompile(`(?i)(?:proto=)(https|http)`) +) + +// ProxyHeaders inspects common reverse proxy headers and sets the corresponding +// fields in the HTTP request struct. These are X-Forwarded-For and X-Real-IP +// for the remote (client) IP address, X-Forwarded-Proto or X-Forwarded-Scheme +// for the scheme (http|https), X-Forwarded-Host for the host and the RFC7239 +// Forwarded header, which may include both client IPs and schemes. +// +// NOTE: This middleware should only be used when behind a reverse +// proxy like nginx, HAProxy or Apache. Reverse proxies that don't (or are +// configured not to) strip these headers from client requests, or where these +// headers are accepted "as is" from a remote client (e.g. when Go is not behind +// a proxy), can manifest as a vulnerability if your application uses these +// headers for validating the 'trustworthiness' of a request. +func ProxyHeaders(h http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + // Set the remote IP with the value passed from the proxy. + if fwd := getIP(r); fwd != "" { + r.RemoteAddr = fwd + } + + // Set the scheme (proto) with the value passed from the proxy. + if scheme := getScheme(r); scheme != "" { + r.URL.Scheme = scheme + } + // Set the host with the value passed by the proxy + if r.Header.Get(xForwardedHost) != "" { + r.Host = r.Header.Get(xForwardedHost) + } + // Call the next handler in the chain. + h.ServeHTTP(w, r) + } + + return http.HandlerFunc(fn) +} + +// getIP retrieves the IP from the X-Forwarded-For, X-Real-IP and RFC7239 +// Forwarded headers (in that order). +func getIP(r *http.Request) string { + var addr string + + switch { + case r.Header.Get(xForwardedFor) != "": + fwd := r.Header.Get(xForwardedFor) + // Only grab the first (client) address. Note that '192.168.0.1, + // 10.1.1.1' is a valid key for X-Forwarded-For where addresses after + // the first may represent forwarding proxies earlier in the chain. + s := strings.Index(fwd, ", ") + if s == -1 { + s = len(fwd) + } + addr = fwd[:s] + case r.Header.Get(xRealIP) != "": + addr = r.Header.Get(xRealIP) + case r.Header.Get(forwarded) != "": + // match should contain at least two elements if the protocol was + // specified in the Forwarded header. The first element will always be + // the 'for=' capture, which we ignore. In the case of multiple IP + // addresses (for=8.8.8.8, 8.8.4.4,172.16.1.20 is valid) we only + // extract the first, which should be the client IP. + if match := forRegex.FindStringSubmatch(r.Header.Get(forwarded)); len(match) > 1 { + // IPv6 addresses in Forwarded headers are quoted-strings. We strip + // these quotes. + addr = strings.Trim(match[1], `"`) + } + } + + return addr +} + +// getScheme retrieves the scheme from the X-Forwarded-Proto and RFC7239 +// Forwarded headers (in that order). +func getScheme(r *http.Request) string { + var scheme string + + // Retrieve the scheme from X-Forwarded-Proto. + if proto := r.Header.Get(xForwardedProto); proto != "" { + scheme = strings.ToLower(proto) + } else if proto = r.Header.Get(xForwardedScheme); proto != "" { + scheme = strings.ToLower(proto) + } else if proto = r.Header.Get(forwarded); proto != "" { + // match should contain at least two elements if the protocol was + // specified in the Forwarded header. The first element will always be + // the 'proto=' capture, which we ignore. In the case of multiple proto + // parameters (invalid) we only extract the first. + if match := protoRegex.FindStringSubmatch(proto); len(match) > 1 { + scheme = strings.ToLower(match[1]) + } + } + + return scheme +} diff --git a/vendor/github.com/gorilla/handlers/recovery.go b/vendor/github.com/gorilla/handlers/recovery.go new file mode 100644 index 0000000..0d4f955 --- /dev/null +++ b/vendor/github.com/gorilla/handlers/recovery.go @@ -0,0 +1,98 @@ +package handlers + +import ( + "log" + "net/http" + "runtime/debug" +) + +// RecoveryHandlerLogger is an interface used by the recovering handler to print logs. +type RecoveryHandlerLogger interface { + Println(...interface{}) +} + +type recoveryHandler struct { + handler http.Handler + logger RecoveryHandlerLogger + printStack bool +} + +// RecoveryOption provides a functional approach to define +// configuration for a handler; such as setting the logging +// whether or not to print stack traces on panic. +type RecoveryOption func(http.Handler) + +func parseRecoveryOptions(h http.Handler, opts ...RecoveryOption) http.Handler { + for _, option := range opts { + option(h) + } + + return h +} + +// RecoveryHandler is HTTP middleware that recovers from a panic, +// logs the panic, writes http.StatusInternalServerError, and +// continues to the next handler. +// +// Example: +// +// r := mux.NewRouter() +// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { +// panic("Unexpected error!") +// }) +// +// http.ListenAndServe(":1123", handlers.RecoveryHandler()(r)) +func RecoveryHandler(opts ...RecoveryOption) func(h http.Handler) http.Handler { + return func(h http.Handler) http.Handler { + r := &recoveryHandler{handler: h} + return parseRecoveryOptions(r, opts...) + } +} + +// RecoveryLogger is a functional option to override +// the default logger. +func RecoveryLogger(logger RecoveryHandlerLogger) RecoveryOption { + return func(h http.Handler) { + r := h.(*recoveryHandler) //nolint:errcheck //TODO: + // @bharat-rajani should return type-assertion error but would break the API? + r.logger = logger + } +} + +// PrintRecoveryStack is a functional option to enable +// or disable printing stack traces on panic. +func PrintRecoveryStack(shouldPrint bool) RecoveryOption { + return func(h http.Handler) { + r := h.(*recoveryHandler) //nolint:errcheck //TODO: + // @bharat-rajani should return type-assertion error but would break the API? + r.printStack = shouldPrint + } +} + +func (h recoveryHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + defer func() { + if err := recover(); err != nil { + w.WriteHeader(http.StatusInternalServerError) + h.log(err) + } + }() + + h.handler.ServeHTTP(w, req) +} + +func (h recoveryHandler) log(v ...interface{}) { + if h.logger != nil { + h.logger.Println(v...) + } else { + log.Println(v...) + } + + if h.printStack { + stack := string(debug.Stack()) + if h.logger != nil { + h.logger.Println(stack) + } else { + log.Println(stack) + } + } +} diff --git a/vendor/github.com/gorilla/mux/.editorconfig b/vendor/github.com/gorilla/mux/.editorconfig new file mode 100644 index 0000000..c6b74c3 --- /dev/null +++ b/vendor/github.com/gorilla/mux/.editorconfig @@ -0,0 +1,20 @@ +; https://editorconfig.org/ + +root = true + +[*] +insert_final_newline = true +charset = utf-8 +trim_trailing_whitespace = true +indent_style = space +indent_size = 2 + +[{Makefile,go.mod,go.sum,*.go,.gitmodules}] +indent_style = tab +indent_size = 4 + +[*.md] +indent_size = 4 +trim_trailing_whitespace = false + +eclint_indent_style = unset \ No newline at end of file diff --git a/vendor/github.com/gorilla/mux/.gitignore b/vendor/github.com/gorilla/mux/.gitignore new file mode 100644 index 0000000..84039fe --- /dev/null +++ b/vendor/github.com/gorilla/mux/.gitignore @@ -0,0 +1 @@ +coverage.coverprofile diff --git a/vendor/github.com/gorilla/mux/LICENSE b/vendor/github.com/gorilla/mux/LICENSE new file mode 100644 index 0000000..bb9d80b --- /dev/null +++ b/vendor/github.com/gorilla/mux/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2023 The Gorilla Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/mux/Makefile b/vendor/github.com/gorilla/mux/Makefile new file mode 100644 index 0000000..98f5ab7 --- /dev/null +++ b/vendor/github.com/gorilla/mux/Makefile @@ -0,0 +1,34 @@ +GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '') +GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest + +GO_SEC=$(shell which gosec 2> /dev/null || echo '') +GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest + +GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '') +GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest + +.PHONY: golangci-lint +golangci-lint: + $(if $(GO_LINT), ,go install $(GO_LINT_URI)) + @echo "##### Running golangci-lint" + golangci-lint run -v + +.PHONY: gosec +gosec: + $(if $(GO_SEC), ,go install $(GO_SEC_URI)) + @echo "##### Running gosec" + gosec ./... + +.PHONY: govulncheck +govulncheck: + $(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI)) + @echo "##### Running govulncheck" + govulncheck ./... + +.PHONY: verify +verify: golangci-lint gosec govulncheck + +.PHONY: test +test: + @echo "##### Running tests" + go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./... \ No newline at end of file diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md new file mode 100644 index 0000000..382513d --- /dev/null +++ b/vendor/github.com/gorilla/mux/README.md @@ -0,0 +1,812 @@ +# gorilla/mux + +![testing](https://github.com/gorilla/mux/actions/workflows/test.yml/badge.svg) +[![codecov](https://codecov.io/github/gorilla/mux/branch/main/graph/badge.svg)](https://codecov.io/github/gorilla/mux) +[![godoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux) +[![sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge) + + +![Gorilla Logo](https://github.com/gorilla/.github/assets/53367916/d92caabf-98e0-473e-bfbf-ab554ba435e5) + +Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to +their respective handler. + +The name mux stands for "HTTP request multiplexer". Like the standard `http.ServeMux`, `mux.Router` matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are: + +* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`. +* Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers. +* URL hosts, paths and query values can have variables with an optional regular expression. +* Registered URLs can be built, or "reversed", which helps maintaining references to resources. +* Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching. + +--- + +* [Install](#install) +* [Examples](#examples) +* [Matching Routes](#matching-routes) +* [Static Files](#static-files) +* [Serving Single Page Applications](#serving-single-page-applications) (e.g. React, Vue, Ember.js, etc.) +* [Registered URLs](#registered-urls) +* [Walking Routes](#walking-routes) +* [Graceful Shutdown](#graceful-shutdown) +* [Middleware](#middleware) +* [Handling CORS Requests](#handling-cors-requests) +* [Testing Handlers](#testing-handlers) +* [Full Example](#full-example) + +--- + +## Install + +With a [correctly configured](https://golang.org/doc/install#testing) Go toolchain: + +```sh +go get -u github.com/gorilla/mux +``` + +## Examples + +Let's start registering a couple of URL paths and handlers: + +```go +func main() { + r := mux.NewRouter() + r.HandleFunc("/", HomeHandler) + r.HandleFunc("/products", ProductsHandler) + r.HandleFunc("/articles", ArticlesHandler) + http.Handle("/", r) +} +``` + +Here we register three routes mapping URL paths to handlers. This is equivalent to how `http.HandleFunc()` works: if an incoming request URL matches one of the paths, the corresponding handler is called passing (`http.ResponseWriter`, `*http.Request`) as parameters. + +Paths can have variables. They are defined using the format `{name}` or `{name:pattern}`. If a regular expression pattern is not defined, the matched variable will be anything until the next slash. For example: + +```go +r := mux.NewRouter() +r.HandleFunc("/products/{key}", ProductHandler) +r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) +r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) +``` + +The names are used to create a map of route variables which can be retrieved calling `mux.Vars()`: + +```go +func ArticlesCategoryHandler(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, "Category: %v\n", vars["category"]) +} +``` + +And this is all you need to know about the basic usage. More advanced options are explained below. + +### Matching Routes + +Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables: + +```go +r := mux.NewRouter() +// Only matches if domain is "www.example.com". +r.Host("www.example.com") +// Matches a dynamic subdomain. +r.Host("{subdomain:[a-z]+}.example.com") +``` + +There are several other matchers that can be added. To match path prefixes: + +```go +r.PathPrefix("/products/") +``` + +...or HTTP methods: + +```go +r.Methods("GET", "POST") +``` + +...or URL schemes: + +```go +r.Schemes("https") +``` + +...or header values: + +```go +r.Headers("X-Requested-With", "XMLHttpRequest") +``` + +...or query values: + +```go +r.Queries("key", "value") +``` + +...or to use a custom matcher function: + +```go +r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { + return r.ProtoMajor == 0 +}) +``` + +...and finally, it is possible to combine several matchers in a single route: + +```go +r.HandleFunc("/products", ProductsHandler). + Host("www.example.com"). + Methods("GET"). + Schemes("http") +``` + +Routes are tested in the order they were added to the router. If two routes match, the first one wins: + +```go +r := mux.NewRouter() +r.HandleFunc("/specific", specificHandler) +r.PathPrefix("/").Handler(catchAllHandler) +``` + +Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting". + +For example, let's say we have several URLs that should only match when the host is `www.example.com`. Create a route for that host and get a "subrouter" from it: + +```go +r := mux.NewRouter() +s := r.Host("www.example.com").Subrouter() +``` + +Then register routes in the subrouter: + +```go +s.HandleFunc("/products/", ProductsHandler) +s.HandleFunc("/products/{key}", ProductHandler) +s.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) +``` + +The three URL paths we registered above will only be tested if the domain is `www.example.com`, because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route. + +Subrouters can be used to create domain or path "namespaces": you define subrouters in a central place and then parts of the app can register its paths relatively to a given subrouter. + +There's one more thing about subroutes. When a subrouter has a path prefix, the inner routes use it as base for their paths: + +```go +r := mux.NewRouter() +s := r.PathPrefix("/products").Subrouter() +// "/products/" +s.HandleFunc("/", ProductsHandler) +// "/products/{key}/" +s.HandleFunc("/{key}/", ProductHandler) +// "/products/{key}/details" +s.HandleFunc("/{key}/details", ProductDetailsHandler) +``` + + +### Static Files + +Note that the path provided to `PathPrefix()` represents a "wildcard": calling +`PathPrefix("/static/").Handler(...)` means that the handler will be passed any +request that matches "/static/\*". This makes it easy to serve static files with mux: + +```go +func main() { + var dir string + + flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir") + flag.Parse() + r := mux.NewRouter() + + // This will serve files under http://localhost:8000/static/ + r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir)))) + + srv := &http.Server{ + Handler: r, + Addr: "127.0.0.1:8000", + // Good practice: enforce timeouts for servers you create! + WriteTimeout: 15 * time.Second, + ReadTimeout: 15 * time.Second, + } + + log.Fatal(srv.ListenAndServe()) +} +``` + +### Serving Single Page Applications + +Most of the time it makes sense to serve your SPA on a separate web server from your API, +but sometimes it's desirable to serve them both from one place. It's possible to write a simple +handler for serving your SPA (for use with React Router's [BrowserRouter](https://reacttraining.com/react-router/web/api/BrowserRouter) for example), and leverage +mux's powerful routing for your API endpoints. + +```go +package main + +import ( + "encoding/json" + "log" + "net/http" + "os" + "path/filepath" + "time" + + "github.com/gorilla/mux" +) + +// spaHandler implements the http.Handler interface, so we can use it +// to respond to HTTP requests. The path to the static directory and +// path to the index file within that static directory are used to +// serve the SPA in the given static directory. +type spaHandler struct { + staticPath string + indexPath string +} + +// ServeHTTP inspects the URL path to locate a file within the static dir +// on the SPA handler. If a file is found, it will be served. If not, the +// file located at the index path on the SPA handler will be served. This +// is suitable behavior for serving an SPA (single page application). +func (h spaHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + // Join internally call path.Clean to prevent directory traversal + path := filepath.Join(h.staticPath, r.URL.Path) + + // check whether a file exists or is a directory at the given path + fi, err := os.Stat(path) + if os.IsNotExist(err) || fi.IsDir() { + // file does not exist or path is a directory, serve index.html + http.ServeFile(w, r, filepath.Join(h.staticPath, h.indexPath)) + return + } + + if err != nil { + // if we got an error (that wasn't that the file doesn't exist) stating the + // file, return a 500 internal server error and stop + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + // otherwise, use http.FileServer to serve the static file + http.FileServer(http.Dir(h.staticPath)).ServeHTTP(w, r) +} + +func main() { + router := mux.NewRouter() + + router.HandleFunc("/api/health", func(w http.ResponseWriter, r *http.Request) { + // an example API handler + json.NewEncoder(w).Encode(map[string]bool{"ok": true}) + }) + + spa := spaHandler{staticPath: "build", indexPath: "index.html"} + router.PathPrefix("/").Handler(spa) + + srv := &http.Server{ + Handler: router, + Addr: "127.0.0.1:8000", + // Good practice: enforce timeouts for servers you create! + WriteTimeout: 15 * time.Second, + ReadTimeout: 15 * time.Second, + } + + log.Fatal(srv.ListenAndServe()) +} +``` + +### Registered URLs + +Now let's see how to build registered URLs. + +Routes can be named. All routes that define a name can have their URLs built, or "reversed". We define a name calling `Name()` on a route. For example: + +```go +r := mux.NewRouter() +r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). + Name("article") +``` + +To build a URL, get the route and call the `URL()` method, passing a sequence of key/value pairs for the route variables. For the previous route, we would do: + +```go +url, err := r.Get("article").URL("category", "technology", "id", "42") +``` + +...and the result will be a `url.URL` with the following path: + +``` +"/articles/technology/42" +``` + +This also works for host and query value variables: + +```go +r := mux.NewRouter() +r.Host("{subdomain}.example.com"). + Path("/articles/{category}/{id:[0-9]+}"). + Queries("filter", "{filter}"). + HandlerFunc(ArticleHandler). + Name("article") + +// url.String() will be "http://news.example.com/articles/technology/42?filter=gorilla" +url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42", + "filter", "gorilla") +``` + +All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match. + +Regex support also exists for matching Headers within a route. For example, we could do: + +```go +r.HeadersRegexp("Content-Type", "application/(text|json)") +``` + +...and the route will match both requests with a Content-Type of `application/json` as well as `application/text` + +There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do: + +```go +// "http://news.example.com/" +host, err := r.Get("article").URLHost("subdomain", "news") + +// "/articles/technology/42" +path, err := r.Get("article").URLPath("category", "technology", "id", "42") +``` + +And if you use subrouters, host and path defined separately can be built as well: + +```go +r := mux.NewRouter() +s := r.Host("{subdomain}.example.com").Subrouter() +s.Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + +// "http://news.example.com/articles/technology/42" +url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") +``` + +To find all the required variables for a given route when calling `URL()`, the method `GetVarNames()` is available: +```go +r := mux.NewRouter() +r.Host("{domain}"). + Path("/{group}/{item_id}"). + Queries("some_data1", "{some_data1}"). + Queries("some_data2", "{some_data2}"). + Name("article") + +// Will print [domain group item_id some_data1 some_data2] +fmt.Println(r.Get("article").GetVarNames()) + +``` +### Walking Routes + +The `Walk` function on `mux.Router` can be used to visit all of the routes that are registered on a router. For example, +the following prints all of the registered routes: + +```go +package main + +import ( + "fmt" + "net/http" + "strings" + + "github.com/gorilla/mux" +) + +func handler(w http.ResponseWriter, r *http.Request) { + return +} + +func main() { + r := mux.NewRouter() + r.HandleFunc("/", handler) + r.HandleFunc("/products", handler).Methods("POST") + r.HandleFunc("/articles", handler).Methods("GET") + r.HandleFunc("/articles/{id}", handler).Methods("GET", "PUT") + r.HandleFunc("/authors", handler).Queries("surname", "{surname}") + err := r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error { + pathTemplate, err := route.GetPathTemplate() + if err == nil { + fmt.Println("ROUTE:", pathTemplate) + } + pathRegexp, err := route.GetPathRegexp() + if err == nil { + fmt.Println("Path regexp:", pathRegexp) + } + queriesTemplates, err := route.GetQueriesTemplates() + if err == nil { + fmt.Println("Queries templates:", strings.Join(queriesTemplates, ",")) + } + queriesRegexps, err := route.GetQueriesRegexp() + if err == nil { + fmt.Println("Queries regexps:", strings.Join(queriesRegexps, ",")) + } + methods, err := route.GetMethods() + if err == nil { + fmt.Println("Methods:", strings.Join(methods, ",")) + } + fmt.Println() + return nil + }) + + if err != nil { + fmt.Println(err) + } + + http.Handle("/", r) +} +``` + +### Graceful Shutdown + +Go 1.8 introduced the ability to [gracefully shutdown](https://golang.org/doc/go1.8#http_shutdown) a `*http.Server`. Here's how to do that alongside `mux`: + +```go +package main + +import ( + "context" + "flag" + "log" + "net/http" + "os" + "os/signal" + "time" + + "github.com/gorilla/mux" +) + +func main() { + var wait time.Duration + flag.DurationVar(&wait, "graceful-timeout", time.Second * 15, "the duration for which the server gracefully wait for existing connections to finish - e.g. 15s or 1m") + flag.Parse() + + r := mux.NewRouter() + // Add your routes as needed + + srv := &http.Server{ + Addr: "0.0.0.0:8080", + // Good practice to set timeouts to avoid Slowloris attacks. + WriteTimeout: time.Second * 15, + ReadTimeout: time.Second * 15, + IdleTimeout: time.Second * 60, + Handler: r, // Pass our instance of gorilla/mux in. + } + + // Run our server in a goroutine so that it doesn't block. + go func() { + if err := srv.ListenAndServe(); err != nil { + log.Println(err) + } + }() + + c := make(chan os.Signal, 1) + // We'll accept graceful shutdowns when quit via SIGINT (Ctrl+C) + // SIGKILL, SIGQUIT or SIGTERM (Ctrl+/) will not be caught. + signal.Notify(c, os.Interrupt) + + // Block until we receive our signal. + <-c + + // Create a deadline to wait for. + ctx, cancel := context.WithTimeout(context.Background(), wait) + defer cancel() + // Doesn't block if no connections, but will otherwise wait + // until the timeout deadline. + srv.Shutdown(ctx) + // Optionally, you could run srv.Shutdown in a goroutine and block on + // <-ctx.Done() if your application should wait for other services + // to finalize based on context cancellation. + log.Println("shutting down") + os.Exit(0) +} +``` + +### Middleware + +Mux supports the addition of middlewares to a [Router](https://godoc.org/github.com/gorilla/mux#Router), which are executed in the order they are added if a match is found, including its subrouters. +Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or `ResponseWriter` hijacking. + +Mux middlewares are defined using the de facto standard type: + +```go +type MiddlewareFunc func(http.Handler) http.Handler +``` + +Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc. This takes advantage of closures being able access variables from the context where they are created, while retaining the signature enforced by the receivers. + +A very basic middleware which logs the URI of the request being handled could be written as: + +```go +func loggingMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Do stuff here + log.Println(r.RequestURI) + // Call the next handler, which can be another middleware in the chain, or the final handler. + next.ServeHTTP(w, r) + }) +} +``` + +Middlewares can be added to a router using `Router.Use()`: + +```go +r := mux.NewRouter() +r.HandleFunc("/", handler) +r.Use(loggingMiddleware) +``` + +A more complex authentication middleware, which maps session token to users, could be written as: + +```go +// Define our struct +type authenticationMiddleware struct { + tokenUsers map[string]string +} + +// Initialize it somewhere +func (amw *authenticationMiddleware) Populate() { + amw.tokenUsers["00000000"] = "user0" + amw.tokenUsers["aaaaaaaa"] = "userA" + amw.tokenUsers["05f717e5"] = "randomUser" + amw.tokenUsers["deadbeef"] = "user0" +} + +// Middleware function, which will be called for each request +func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + token := r.Header.Get("X-Session-Token") + + if user, found := amw.tokenUsers[token]; found { + // We found the token in our map + log.Printf("Authenticated user %s\n", user) + // Pass down the request to the next middleware (or final handler) + next.ServeHTTP(w, r) + } else { + // Write an error and stop the handler chain + http.Error(w, "Forbidden", http.StatusForbidden) + } + }) +} +``` + +```go +r := mux.NewRouter() +r.HandleFunc("/", handler) + +amw := authenticationMiddleware{tokenUsers: make(map[string]string)} +amw.Populate() + +r.Use(amw.Middleware) +``` + +Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. Middlewares _should_ write to `ResponseWriter` if they _are_ going to terminate the request, and they _should not_ write to `ResponseWriter` if they _are not_ going to terminate it. + +### Handling CORS Requests + +[CORSMethodMiddleware](https://godoc.org/github.com/gorilla/mux#CORSMethodMiddleware) intends to make it easier to strictly set the `Access-Control-Allow-Methods` response header. + +* You will still need to use your own CORS handler to set the other CORS headers such as `Access-Control-Allow-Origin` +* The middleware will set the `Access-Control-Allow-Methods` header to all the method matchers (e.g. `r.Methods(http.MethodGet, http.MethodPut, http.MethodOptions)` -> `Access-Control-Allow-Methods: GET,PUT,OPTIONS`) on a route +* If you do not specify any methods, then: +> _Important_: there must be an `OPTIONS` method matcher for the middleware to set the headers. + +Here is an example of using `CORSMethodMiddleware` along with a custom `OPTIONS` handler to set all the required CORS headers: + +```go +package main + +import ( + "net/http" + "github.com/gorilla/mux" +) + +func main() { + r := mux.NewRouter() + + // IMPORTANT: you must specify an OPTIONS method matcher for the middleware to set CORS headers + r.HandleFunc("/foo", fooHandler).Methods(http.MethodGet, http.MethodPut, http.MethodPatch, http.MethodOptions) + r.Use(mux.CORSMethodMiddleware(r)) + + http.ListenAndServe(":8080", r) +} + +func fooHandler(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Access-Control-Allow-Origin", "*") + if r.Method == http.MethodOptions { + return + } + + w.Write([]byte("foo")) +} +``` + +And an request to `/foo` using something like: + +```bash +curl localhost:8080/foo -v +``` + +Would look like: + +```bash +* Trying ::1... +* TCP_NODELAY set +* Connected to localhost (::1) port 8080 (#0) +> GET /foo HTTP/1.1 +> Host: localhost:8080 +> User-Agent: curl/7.59.0 +> Accept: */* +> +< HTTP/1.1 200 OK +< Access-Control-Allow-Methods: GET,PUT,PATCH,OPTIONS +< Access-Control-Allow-Origin: * +< Date: Fri, 28 Jun 2019 20:13:30 GMT +< Content-Length: 3 +< Content-Type: text/plain; charset=utf-8 +< +* Connection #0 to host localhost left intact +foo +``` + +### Testing Handlers + +Testing handlers in a Go web application is straightforward, and _mux_ doesn't complicate this any further. Given two files: `endpoints.go` and `endpoints_test.go`, here's how we'd test an application using _mux_. + +First, our simple HTTP handler: + +```go +// endpoints.go +package main + +func HealthCheckHandler(w http.ResponseWriter, r *http.Request) { + // A very simple health check. + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + // In the future we could report back on the status of our DB, or our cache + // (e.g. Redis) by performing a simple PING, and include them in the response. + io.WriteString(w, `{"alive": true}`) +} + +func main() { + r := mux.NewRouter() + r.HandleFunc("/health", HealthCheckHandler) + + log.Fatal(http.ListenAndServe("localhost:8080", r)) +} +``` + +Our test code: + +```go +// endpoints_test.go +package main + +import ( + "net/http" + "net/http/httptest" + "testing" +) + +func TestHealthCheckHandler(t *testing.T) { + // Create a request to pass to our handler. We don't have any query parameters for now, so we'll + // pass 'nil' as the third parameter. + req, err := http.NewRequest("GET", "/health", nil) + if err != nil { + t.Fatal(err) + } + + // We create a ResponseRecorder (which satisfies http.ResponseWriter) to record the response. + rr := httptest.NewRecorder() + handler := http.HandlerFunc(HealthCheckHandler) + + // Our handlers satisfy http.Handler, so we can call their ServeHTTP method + // directly and pass in our Request and ResponseRecorder. + handler.ServeHTTP(rr, req) + + // Check the status code is what we expect. + if status := rr.Code; status != http.StatusOK { + t.Errorf("handler returned wrong status code: got %v want %v", + status, http.StatusOK) + } + + // Check the response body is what we expect. + expected := `{"alive": true}` + if rr.Body.String() != expected { + t.Errorf("handler returned unexpected body: got %v want %v", + rr.Body.String(), expected) + } +} +``` + +In the case that our routes have [variables](#examples), we can pass those in the request. We could write +[table-driven tests](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go) to test multiple +possible route variables as needed. + +```go +// endpoints.go +func main() { + r := mux.NewRouter() + // A route with a route variable: + r.HandleFunc("/metrics/{type}", MetricsHandler) + + log.Fatal(http.ListenAndServe("localhost:8080", r)) +} +``` + +Our test file, with a table-driven test of `routeVariables`: + +```go +// endpoints_test.go +func TestMetricsHandler(t *testing.T) { + tt := []struct{ + routeVariable string + shouldPass bool + }{ + {"goroutines", true}, + {"heap", true}, + {"counters", true}, + {"queries", true}, + {"adhadaeqm3k", false}, + } + + for _, tc := range tt { + path := fmt.Sprintf("/metrics/%s", tc.routeVariable) + req, err := http.NewRequest("GET", path, nil) + if err != nil { + t.Fatal(err) + } + + rr := httptest.NewRecorder() + + // To add the vars to the context, + // we need to create a router through which we can pass the request. + router := mux.NewRouter() + router.HandleFunc("/metrics/{type}", MetricsHandler) + router.ServeHTTP(rr, req) + + // In this case, our MetricsHandler returns a non-200 response + // for a route variable it doesn't know about. + if rr.Code == http.StatusOK && !tc.shouldPass { + t.Errorf("handler should have failed on routeVariable %s: got %v want %v", + tc.routeVariable, rr.Code, http.StatusOK) + } + } +} +``` + +## Full Example + +Here's a complete, runnable example of a small `mux` based server: + +```go +package main + +import ( + "net/http" + "log" + "github.com/gorilla/mux" +) + +func YourHandler(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("Gorilla!\n")) +} + +func main() { + r := mux.NewRouter() + // Routes consist of a path and a handler function. + r.HandleFunc("/", YourHandler) + + // Bind to a port and pass our router in + log.Fatal(http.ListenAndServe(":8000", r)) +} +``` + +## License + +BSD licensed. See the LICENSE file for details. diff --git a/vendor/github.com/gorilla/mux/doc.go b/vendor/github.com/gorilla/mux/doc.go new file mode 100644 index 0000000..8060135 --- /dev/null +++ b/vendor/github.com/gorilla/mux/doc.go @@ -0,0 +1,305 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package mux implements a request router and dispatcher. + +The name mux stands for "HTTP request multiplexer". Like the standard +http.ServeMux, mux.Router matches incoming requests against a list of +registered routes and calls a handler for the route that matches the URL +or other conditions. The main features are: + + - Requests can be matched based on URL host, path, path prefix, schemes, + header and query values, HTTP methods or using custom matchers. + - URL hosts, paths and query values can have variables with an optional + regular expression. + - Registered URLs can be built, or "reversed", which helps maintaining + references to resources. + - Routes can be used as subrouters: nested routes are only tested if the + parent route matches. This is useful to define groups of routes that + share common conditions like a host, a path prefix or other repeated + attributes. As a bonus, this optimizes request matching. + - It implements the http.Handler interface so it is compatible with the + standard http.ServeMux. + +Let's start registering a couple of URL paths and handlers: + + func main() { + r := mux.NewRouter() + r.HandleFunc("/", HomeHandler) + r.HandleFunc("/products", ProductsHandler) + r.HandleFunc("/articles", ArticlesHandler) + http.Handle("/", r) + } + +Here we register three routes mapping URL paths to handlers. This is +equivalent to how http.HandleFunc() works: if an incoming request URL matches +one of the paths, the corresponding handler is called passing +(http.ResponseWriter, *http.Request) as parameters. + +Paths can have variables. They are defined using the format {name} or +{name:pattern}. If a regular expression pattern is not defined, the matched +variable will be anything until the next slash. For example: + + r := mux.NewRouter() + r.HandleFunc("/products/{key}", ProductHandler) + r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) + r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) + +Groups can be used inside patterns, as long as they are non-capturing (?:re). For example: + + r.HandleFunc("/articles/{category}/{sort:(?:asc|desc|new)}", ArticlesCategoryHandler) + +The names are used to create a map of route variables which can be retrieved +calling mux.Vars(): + + vars := mux.Vars(request) + category := vars["category"] + +Note that if any capturing groups are present, mux will panic() during parsing. To prevent +this, convert any capturing groups to non-capturing, e.g. change "/{sort:(asc|desc)}" to +"/{sort:(?:asc|desc)}". This is a change from prior versions which behaved unpredictably +when capturing groups were present. + +And this is all you need to know about the basic usage. More advanced options +are explained below. + +Routes can also be restricted to a domain or subdomain. Just define a host +pattern to be matched. They can also have variables: + + r := mux.NewRouter() + // Only matches if domain is "www.example.com". + r.Host("www.example.com") + // Matches a dynamic subdomain. + r.Host("{subdomain:[a-z]+}.domain.com") + +There are several other matchers that can be added. To match path prefixes: + + r.PathPrefix("/products/") + +...or HTTP methods: + + r.Methods("GET", "POST") + +...or URL schemes: + + r.Schemes("https") + +...or header values: + + r.Headers("X-Requested-With", "XMLHttpRequest") + +...or query values: + + r.Queries("key", "value") + +...or to use a custom matcher function: + + r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { + return r.ProtoMajor == 0 + }) + +...and finally, it is possible to combine several matchers in a single route: + + r.HandleFunc("/products", ProductsHandler). + Host("www.example.com"). + Methods("GET"). + Schemes("http") + +Setting the same matching conditions again and again can be boring, so we have +a way to group several routes that share the same requirements. +We call it "subrouting". + +For example, let's say we have several URLs that should only match when the +host is "www.example.com". Create a route for that host and get a "subrouter" +from it: + + r := mux.NewRouter() + s := r.Host("www.example.com").Subrouter() + +Then register routes in the subrouter: + + s.HandleFunc("/products/", ProductsHandler) + s.HandleFunc("/products/{key}", ProductHandler) + s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) + +The three URL paths we registered above will only be tested if the domain is +"www.example.com", because the subrouter is tested first. This is not +only convenient, but also optimizes request matching. You can create +subrouters combining any attribute matchers accepted by a route. + +Subrouters can be used to create domain or path "namespaces": you define +subrouters in a central place and then parts of the app can register its +paths relatively to a given subrouter. + +There's one more thing about subroutes. When a subrouter has a path prefix, +the inner routes use it as base for their paths: + + r := mux.NewRouter() + s := r.PathPrefix("/products").Subrouter() + // "/products/" + s.HandleFunc("/", ProductsHandler) + // "/products/{key}/" + s.HandleFunc("/{key}/", ProductHandler) + // "/products/{key}/details" + s.HandleFunc("/{key}/details", ProductDetailsHandler) + +Note that the path provided to PathPrefix() represents a "wildcard": calling +PathPrefix("/static/").Handler(...) means that the handler will be passed any +request that matches "/static/*". This makes it easy to serve static files with mux: + + func main() { + var dir string + + flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir") + flag.Parse() + r := mux.NewRouter() + + // This will serve files under http://localhost:8000/static/ + r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir)))) + + srv := &http.Server{ + Handler: r, + Addr: "127.0.0.1:8000", + // Good practice: enforce timeouts for servers you create! + WriteTimeout: 15 * time.Second, + ReadTimeout: 15 * time.Second, + } + + log.Fatal(srv.ListenAndServe()) + } + +Now let's see how to build registered URLs. + +Routes can be named. All routes that define a name can have their URLs built, +or "reversed". We define a name calling Name() on a route. For example: + + r := mux.NewRouter() + r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). + Name("article") + +To build a URL, get the route and call the URL() method, passing a sequence of +key/value pairs for the route variables. For the previous route, we would do: + + url, err := r.Get("article").URL("category", "technology", "id", "42") + +...and the result will be a url.URL with the following path: + + "/articles/technology/42" + +This also works for host and query value variables: + + r := mux.NewRouter() + r.Host("{subdomain}.domain.com"). + Path("/articles/{category}/{id:[0-9]+}"). + Queries("filter", "{filter}"). + HandlerFunc(ArticleHandler). + Name("article") + + // url.String() will be "http://news.domain.com/articles/technology/42?filter=gorilla" + url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42", + "filter", "gorilla") + +All variables defined in the route are required, and their values must +conform to the corresponding patterns. These requirements guarantee that a +generated URL will always match a registered route -- the only exception is +for explicitly defined "build-only" routes which never match. + +Regex support also exists for matching Headers within a route. For example, we could do: + + r.HeadersRegexp("Content-Type", "application/(text|json)") + +...and the route will match both requests with a Content-Type of `application/json` as well as +`application/text` + +There's also a way to build only the URL host or path for a route: +use the methods URLHost() or URLPath() instead. For the previous route, +we would do: + + // "http://news.domain.com/" + host, err := r.Get("article").URLHost("subdomain", "news") + + // "/articles/technology/42" + path, err := r.Get("article").URLPath("category", "technology", "id", "42") + +And if you use subrouters, host and path defined separately can be built +as well: + + r := mux.NewRouter() + s := r.Host("{subdomain}.domain.com").Subrouter() + s.Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + + // "http://news.domain.com/articles/technology/42" + url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") + +Mux supports the addition of middlewares to a Router, which are executed in the order they are added if a match is found, including its subrouters. Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or ResponseWriter hijacking. + + type MiddlewareFunc func(http.Handler) http.Handler + +Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc (closures can access variables from the context where they are created). + +A very basic middleware which logs the URI of the request being handled could be written as: + + func simpleMw(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Do stuff here + log.Println(r.RequestURI) + // Call the next handler, which can be another middleware in the chain, or the final handler. + next.ServeHTTP(w, r) + }) + } + +Middlewares can be added to a router using `Router.Use()`: + + r := mux.NewRouter() + r.HandleFunc("/", handler) + r.Use(simpleMw) + +A more complex authentication middleware, which maps session token to users, could be written as: + + // Define our struct + type authenticationMiddleware struct { + tokenUsers map[string]string + } + + // Initialize it somewhere + func (amw *authenticationMiddleware) Populate() { + amw.tokenUsers["00000000"] = "user0" + amw.tokenUsers["aaaaaaaa"] = "userA" + amw.tokenUsers["05f717e5"] = "randomUser" + amw.tokenUsers["deadbeef"] = "user0" + } + + // Middleware function, which will be called for each request + func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + token := r.Header.Get("X-Session-Token") + + if user, found := amw.tokenUsers[token]; found { + // We found the token in our map + log.Printf("Authenticated user %s\n", user) + next.ServeHTTP(w, r) + } else { + http.Error(w, "Forbidden", http.StatusForbidden) + } + }) + } + + r := mux.NewRouter() + r.HandleFunc("/", handler) + + amw := authenticationMiddleware{tokenUsers: make(map[string]string)} + amw.Populate() + + r.Use(amw.Middleware) + +Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. +*/ +package mux diff --git a/vendor/github.com/gorilla/mux/middleware.go b/vendor/github.com/gorilla/mux/middleware.go new file mode 100644 index 0000000..cb51c56 --- /dev/null +++ b/vendor/github.com/gorilla/mux/middleware.go @@ -0,0 +1,74 @@ +package mux + +import ( + "net/http" + "strings" +) + +// MiddlewareFunc is a function which receives an http.Handler and returns another http.Handler. +// Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed +// to it, and then calls the handler passed as parameter to the MiddlewareFunc. +type MiddlewareFunc func(http.Handler) http.Handler + +// middleware interface is anything which implements a MiddlewareFunc named Middleware. +type middleware interface { + Middleware(handler http.Handler) http.Handler +} + +// Middleware allows MiddlewareFunc to implement the middleware interface. +func (mw MiddlewareFunc) Middleware(handler http.Handler) http.Handler { + return mw(handler) +} + +// Use appends a MiddlewareFunc to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router. +func (r *Router) Use(mwf ...MiddlewareFunc) { + for _, fn := range mwf { + r.middlewares = append(r.middlewares, fn) + } +} + +// useInterface appends a middleware to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router. +func (r *Router) useInterface(mw middleware) { + r.middlewares = append(r.middlewares, mw) +} + +// CORSMethodMiddleware automatically sets the Access-Control-Allow-Methods response header +// on requests for routes that have an OPTIONS method matcher to all the method matchers on +// the route. Routes that do not explicitly handle OPTIONS requests will not be processed +// by the middleware. See examples for usage. +func CORSMethodMiddleware(r *Router) MiddlewareFunc { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + allMethods, err := getAllMethodsForRoute(r, req) + if err == nil { + for _, v := range allMethods { + if v == http.MethodOptions { + w.Header().Set("Access-Control-Allow-Methods", strings.Join(allMethods, ",")) + } + } + } + + next.ServeHTTP(w, req) + }) + } +} + +// getAllMethodsForRoute returns all the methods from method matchers matching a given +// request. +func getAllMethodsForRoute(r *Router, req *http.Request) ([]string, error) { + var allMethods []string + + for _, route := range r.routes { + var match RouteMatch + if route.Match(req, &match) || match.MatchErr == ErrMethodMismatch { + methods, err := route.GetMethods() + if err != nil { + return nil, err + } + + allMethods = append(allMethods, methods...) + } + } + + return allMethods, nil +} diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go new file mode 100644 index 0000000..1e08990 --- /dev/null +++ b/vendor/github.com/gorilla/mux/mux.go @@ -0,0 +1,608 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "context" + "errors" + "fmt" + "net/http" + "path" + "regexp" +) + +var ( + // ErrMethodMismatch is returned when the method in the request does not match + // the method defined against the route. + ErrMethodMismatch = errors.New("method is not allowed") + // ErrNotFound is returned when no route match is found. + ErrNotFound = errors.New("no matching route was found") +) + +// NewRouter returns a new router instance. +func NewRouter() *Router { + return &Router{namedRoutes: make(map[string]*Route)} +} + +// Router registers routes to be matched and dispatches a handler. +// +// It implements the http.Handler interface, so it can be registered to serve +// requests: +// +// var router = mux.NewRouter() +// +// func main() { +// http.Handle("/", router) +// } +// +// Or, for Google App Engine, register it in a init() function: +// +// func init() { +// http.Handle("/", router) +// } +// +// This will send all incoming requests to the router. +type Router struct { + // Configurable Handler to be used when no route matches. + // This can be used to render your own 404 Not Found errors. + NotFoundHandler http.Handler + + // Configurable Handler to be used when the request method does not match the route. + // This can be used to render your own 405 Method Not Allowed errors. + MethodNotAllowedHandler http.Handler + + // Routes to be matched, in order. + routes []*Route + + // Routes by name for URL building. + namedRoutes map[string]*Route + + // If true, do not clear the request context after handling the request. + // + // Deprecated: No effect, since the context is stored on the request itself. + KeepContext bool + + // Slice of middlewares to be called after a match is found + middlewares []middleware + + // configuration shared with `Route` + routeConf +} + +// common route configuration shared between `Router` and `Route` +type routeConf struct { + // If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to" + useEncodedPath bool + + // If true, when the path pattern is "/path/", accessing "/path" will + // redirect to the former and vice versa. + strictSlash bool + + // If true, when the path pattern is "/path//to", accessing "/path//to" + // will not redirect + skipClean bool + + // Manager for the variables from host and path. + regexp routeRegexpGroup + + // List of matchers. + matchers []matcher + + // The scheme used when building URLs. + buildScheme string + + buildVarsFunc BuildVarsFunc +} + +// returns an effective deep copy of `routeConf` +func copyRouteConf(r routeConf) routeConf { + c := r + + if r.regexp.path != nil { + c.regexp.path = copyRouteRegexp(r.regexp.path) + } + + if r.regexp.host != nil { + c.regexp.host = copyRouteRegexp(r.regexp.host) + } + + c.regexp.queries = make([]*routeRegexp, 0, len(r.regexp.queries)) + for _, q := range r.regexp.queries { + c.regexp.queries = append(c.regexp.queries, copyRouteRegexp(q)) + } + + c.matchers = make([]matcher, len(r.matchers)) + copy(c.matchers, r.matchers) + + return c +} + +func copyRouteRegexp(r *routeRegexp) *routeRegexp { + c := *r + return &c +} + +// Match attempts to match the given request against the router's registered routes. +// +// If the request matches a route of this router or one of its subrouters the Route, +// Handler, and Vars fields of the the match argument are filled and this function +// returns true. +// +// If the request does not match any of this router's or its subrouters' routes +// then this function returns false. If available, a reason for the match failure +// will be filled in the match argument's MatchErr field. If the match failure type +// (eg: not found) has a registered handler, the handler is assigned to the Handler +// field of the match argument. +func (r *Router) Match(req *http.Request, match *RouteMatch) bool { + for _, route := range r.routes { + if route.Match(req, match) { + // Build middleware chain if no error was found + if match.MatchErr == nil { + for i := len(r.middlewares) - 1; i >= 0; i-- { + match.Handler = r.middlewares[i].Middleware(match.Handler) + } + } + return true + } + } + + if match.MatchErr == ErrMethodMismatch { + if r.MethodNotAllowedHandler != nil { + match.Handler = r.MethodNotAllowedHandler + return true + } + + return false + } + + // Closest match for a router (includes sub-routers) + if r.NotFoundHandler != nil { + match.Handler = r.NotFoundHandler + match.MatchErr = ErrNotFound + return true + } + + match.MatchErr = ErrNotFound + return false +} + +// ServeHTTP dispatches the handler registered in the matched route. +// +// When there is a match, the route variables can be retrieved calling +// mux.Vars(request). +func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { + if !r.skipClean { + path := req.URL.Path + if r.useEncodedPath { + path = req.URL.EscapedPath() + } + // Clean path to canonical form and redirect. + if p := cleanPath(path); p != path { + + // Added 3 lines (Philip Schlump) - It was dropping the query string and #whatever from query. + // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: + // http://code.google.com/p/go/issues/detail?id=5252 + url := *req.URL + url.Path = p + p = url.String() + + w.Header().Set("Location", p) + w.WriteHeader(http.StatusMovedPermanently) + return + } + } + var match RouteMatch + var handler http.Handler + if r.Match(req, &match) { + handler = match.Handler + req = requestWithVars(req, match.Vars) + req = requestWithRoute(req, match.Route) + } + + if handler == nil && match.MatchErr == ErrMethodMismatch { + handler = methodNotAllowedHandler() + } + + if handler == nil { + handler = http.NotFoundHandler() + } + + handler.ServeHTTP(w, req) +} + +// Get returns a route registered with the given name. +func (r *Router) Get(name string) *Route { + return r.namedRoutes[name] +} + +// GetRoute returns a route registered with the given name. This method +// was renamed to Get() and remains here for backwards compatibility. +func (r *Router) GetRoute(name string) *Route { + return r.namedRoutes[name] +} + +// StrictSlash defines the trailing slash behavior for new routes. The initial +// value is false. +// +// When true, if the route path is "/path/", accessing "/path" will perform a redirect +// to the former and vice versa. In other words, your application will always +// see the path as specified in the route. +// +// When false, if the route path is "/path", accessing "/path/" will not match +// this route and vice versa. +// +// The re-direct is a HTTP 301 (Moved Permanently). Note that when this is set for +// routes with a non-idempotent method (e.g. POST, PUT), the subsequent re-directed +// request will be made as a GET by most clients. Use middleware or client settings +// to modify this behaviour as needed. +// +// Special case: when a route sets a path prefix using the PathPrefix() method, +// strict slash is ignored for that route because the redirect behavior can't +// be determined from a prefix alone. However, any subrouters created from that +// route inherit the original StrictSlash setting. +func (r *Router) StrictSlash(value bool) *Router { + r.strictSlash = value + return r +} + +// SkipClean defines the path cleaning behaviour for new routes. The initial +// value is false. Users should be careful about which routes are not cleaned +// +// When true, if the route path is "/path//to", it will remain with the double +// slash. This is helpful if you have a route like: /fetch/http://xkcd.com/534/ +// +// When false, the path will be cleaned, so /fetch/http://xkcd.com/534/ will +// become /fetch/http/xkcd.com/534 +func (r *Router) SkipClean(value bool) *Router { + r.skipClean = value + return r +} + +// UseEncodedPath tells the router to match the encoded original path +// to the routes. +// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to". +// +// If not called, the router will match the unencoded path to the routes. +// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to" +func (r *Router) UseEncodedPath() *Router { + r.useEncodedPath = true + return r +} + +// ---------------------------------------------------------------------------- +// Route factories +// ---------------------------------------------------------------------------- + +// NewRoute registers an empty route. +func (r *Router) NewRoute() *Route { + // initialize a route with a copy of the parent router's configuration + route := &Route{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes} + r.routes = append(r.routes, route) + return route +} + +// Name registers a new route with a name. +// See Route.Name(). +func (r *Router) Name(name string) *Route { + return r.NewRoute().Name(name) +} + +// Handle registers a new route with a matcher for the URL path. +// See Route.Path() and Route.Handler(). +func (r *Router) Handle(path string, handler http.Handler) *Route { + return r.NewRoute().Path(path).Handler(handler) +} + +// HandleFunc registers a new route with a matcher for the URL path. +// See Route.Path() and Route.HandlerFunc(). +func (r *Router) HandleFunc(path string, f func(http.ResponseWriter, + *http.Request)) *Route { + return r.NewRoute().Path(path).HandlerFunc(f) +} + +// Headers registers a new route with a matcher for request header values. +// See Route.Headers(). +func (r *Router) Headers(pairs ...string) *Route { + return r.NewRoute().Headers(pairs...) +} + +// Host registers a new route with a matcher for the URL host. +// See Route.Host(). +func (r *Router) Host(tpl string) *Route { + return r.NewRoute().Host(tpl) +} + +// MatcherFunc registers a new route with a custom matcher function. +// See Route.MatcherFunc(). +func (r *Router) MatcherFunc(f MatcherFunc) *Route { + return r.NewRoute().MatcherFunc(f) +} + +// Methods registers a new route with a matcher for HTTP methods. +// See Route.Methods(). +func (r *Router) Methods(methods ...string) *Route { + return r.NewRoute().Methods(methods...) +} + +// Path registers a new route with a matcher for the URL path. +// See Route.Path(). +func (r *Router) Path(tpl string) *Route { + return r.NewRoute().Path(tpl) +} + +// PathPrefix registers a new route with a matcher for the URL path prefix. +// See Route.PathPrefix(). +func (r *Router) PathPrefix(tpl string) *Route { + return r.NewRoute().PathPrefix(tpl) +} + +// Queries registers a new route with a matcher for URL query values. +// See Route.Queries(). +func (r *Router) Queries(pairs ...string) *Route { + return r.NewRoute().Queries(pairs...) +} + +// Schemes registers a new route with a matcher for URL schemes. +// See Route.Schemes(). +func (r *Router) Schemes(schemes ...string) *Route { + return r.NewRoute().Schemes(schemes...) +} + +// BuildVarsFunc registers a new route with a custom function for modifying +// route variables before building a URL. +func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route { + return r.NewRoute().BuildVarsFunc(f) +} + +// Walk walks the router and all its sub-routers, calling walkFn for each route +// in the tree. The routes are walked in the order they were added. Sub-routers +// are explored depth-first. +func (r *Router) Walk(walkFn WalkFunc) error { + return r.walk(walkFn, []*Route{}) +} + +// SkipRouter is used as a return value from WalkFuncs to indicate that the +// router that walk is about to descend down to should be skipped. +var SkipRouter = errors.New("skip this router") + +// WalkFunc is the type of the function called for each route visited by Walk. +// At every invocation, it is given the current route, and the current router, +// and a list of ancestor routes that lead to the current route. +type WalkFunc func(route *Route, router *Router, ancestors []*Route) error + +func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error { + for _, t := range r.routes { + err := walkFn(t, r, ancestors) + if err == SkipRouter { + continue + } + if err != nil { + return err + } + for _, sr := range t.matchers { + if h, ok := sr.(*Router); ok { + ancestors = append(ancestors, t) + err := h.walk(walkFn, ancestors) + if err != nil { + return err + } + ancestors = ancestors[:len(ancestors)-1] + } + } + if h, ok := t.handler.(*Router); ok { + ancestors = append(ancestors, t) + err := h.walk(walkFn, ancestors) + if err != nil { + return err + } + ancestors = ancestors[:len(ancestors)-1] + } + } + return nil +} + +// ---------------------------------------------------------------------------- +// Context +// ---------------------------------------------------------------------------- + +// RouteMatch stores information about a matched route. +type RouteMatch struct { + Route *Route + Handler http.Handler + Vars map[string]string + + // MatchErr is set to appropriate matching error + // It is set to ErrMethodMismatch if there is a mismatch in + // the request method and route method + MatchErr error +} + +type contextKey int + +const ( + varsKey contextKey = iota + routeKey +) + +// Vars returns the route variables for the current request, if any. +func Vars(r *http.Request) map[string]string { + if rv := r.Context().Value(varsKey); rv != nil { + return rv.(map[string]string) + } + return nil +} + +// CurrentRoute returns the matched route for the current request, if any. +// This only works when called inside the handler of the matched route +// because the matched route is stored in the request context which is cleared +// after the handler returns. +func CurrentRoute(r *http.Request) *Route { + if rv := r.Context().Value(routeKey); rv != nil { + return rv.(*Route) + } + return nil +} + +func requestWithVars(r *http.Request, vars map[string]string) *http.Request { + ctx := context.WithValue(r.Context(), varsKey, vars) + return r.WithContext(ctx) +} + +func requestWithRoute(r *http.Request, route *Route) *http.Request { + ctx := context.WithValue(r.Context(), routeKey, route) + return r.WithContext(ctx) +} + +// ---------------------------------------------------------------------------- +// Helpers +// ---------------------------------------------------------------------------- + +// cleanPath returns the canonical path for p, eliminating . and .. elements. +// Borrowed from the net/http package. +func cleanPath(p string) string { + if p == "" { + return "/" + } + if p[0] != '/' { + p = "/" + p + } + np := path.Clean(p) + // path.Clean removes trailing slash except for root; + // put the trailing slash back if necessary. + if p[len(p)-1] == '/' && np != "/" { + np += "/" + } + + return np +} + +// uniqueVars returns an error if two slices contain duplicated strings. +func uniqueVars(s1, s2 []string) error { + for _, v1 := range s1 { + for _, v2 := range s2 { + if v1 == v2 { + return fmt.Errorf("mux: duplicated route variable %q", v2) + } + } + } + return nil +} + +// checkPairs returns the count of strings passed in, and an error if +// the count is not an even number. +func checkPairs(pairs ...string) (int, error) { + length := len(pairs) + if length%2 != 0 { + return length, fmt.Errorf( + "mux: number of parameters must be multiple of 2, got %v", pairs) + } + return length, nil +} + +// mapFromPairsToString converts variadic string parameters to a +// string to string map. +func mapFromPairsToString(pairs ...string) (map[string]string, error) { + length, err := checkPairs(pairs...) + if err != nil { + return nil, err + } + m := make(map[string]string, length/2) + for i := 0; i < length; i += 2 { + m[pairs[i]] = pairs[i+1] + } + return m, nil +} + +// mapFromPairsToRegex converts variadic string parameters to a +// string to regex map. +func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) { + length, err := checkPairs(pairs...) + if err != nil { + return nil, err + } + m := make(map[string]*regexp.Regexp, length/2) + for i := 0; i < length; i += 2 { + regex, err := regexp.Compile(pairs[i+1]) + if err != nil { + return nil, err + } + m[pairs[i]] = regex + } + return m, nil +} + +// matchInArray returns true if the given string value is in the array. +func matchInArray(arr []string, value string) bool { + for _, v := range arr { + if v == value { + return true + } + } + return false +} + +// matchMapWithString returns true if the given key/value pairs exist in a given map. +func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool { + for k, v := range toCheck { + // Check if key exists. + if canonicalKey { + k = http.CanonicalHeaderKey(k) + } + if values := toMatch[k]; values == nil { + return false + } else if v != "" { + // If value was defined as an empty string we only check that the + // key exists. Otherwise we also check for equality. + valueExists := false + for _, value := range values { + if v == value { + valueExists = true + break + } + } + if !valueExists { + return false + } + } + } + return true +} + +// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against +// the given regex +func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool { + for k, v := range toCheck { + // Check if key exists. + if canonicalKey { + k = http.CanonicalHeaderKey(k) + } + if values := toMatch[k]; values == nil { + return false + } else if v != nil { + // If value was defined as an empty string we only check that the + // key exists. Otherwise we also check for equality. + valueExists := false + for _, value := range values { + if v.MatchString(value) { + valueExists = true + break + } + } + if !valueExists { + return false + } + } + } + return true +} + +// methodNotAllowed replies to the request with an HTTP status code 405. +func methodNotAllowed(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusMethodNotAllowed) +} + +// methodNotAllowedHandler returns a simple request handler +// that replies to each request with a status code 405. +func methodNotAllowedHandler() http.Handler { return http.HandlerFunc(methodNotAllowed) } diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go new file mode 100644 index 0000000..5d05cfa --- /dev/null +++ b/vendor/github.com/gorilla/mux/regexp.go @@ -0,0 +1,388 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "bytes" + "fmt" + "net/http" + "net/url" + "regexp" + "strconv" + "strings" +) + +type routeRegexpOptions struct { + strictSlash bool + useEncodedPath bool +} + +type regexpType int + +const ( + regexpTypePath regexpType = iota + regexpTypeHost + regexpTypePrefix + regexpTypeQuery +) + +// newRouteRegexp parses a route template and returns a routeRegexp, +// used to match a host, a path or a query string. +// +// It will extract named variables, assemble a regexp to be matched, create +// a "reverse" template to build URLs and compile regexps to validate variable +// values used in URL building. +// +// Previously we accepted only Python-like identifiers for variable +// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that +// name and pattern can't be empty, and names can't contain a colon. +func newRouteRegexp(tpl string, typ regexpType, options routeRegexpOptions) (*routeRegexp, error) { + // Check if it is well-formed. + idxs, errBraces := braceIndices(tpl) + if errBraces != nil { + return nil, errBraces + } + // Backup the original. + template := tpl + // Now let's parse it. + defaultPattern := "[^/]+" + if typ == regexpTypeQuery { + defaultPattern = ".*" + } else if typ == regexpTypeHost { + defaultPattern = "[^.]+" + } + // Only match strict slash if not matching + if typ != regexpTypePath { + options.strictSlash = false + } + // Set a flag for strictSlash. + endSlash := false + if options.strictSlash && strings.HasSuffix(tpl, "/") { + tpl = tpl[:len(tpl)-1] + endSlash = true + } + varsN := make([]string, len(idxs)/2) + varsR := make([]*regexp.Regexp, len(idxs)/2) + pattern := bytes.NewBufferString("") + pattern.WriteByte('^') + reverse := bytes.NewBufferString("") + var end int + var err error + for i := 0; i < len(idxs); i += 2 { + // Set all values we are interested in. + raw := tpl[end:idxs[i]] + end = idxs[i+1] + parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2) + name := parts[0] + patt := defaultPattern + if len(parts) == 2 { + patt = parts[1] + } + // Name or pattern can't be empty. + if name == "" || patt == "" { + return nil, fmt.Errorf("mux: missing name or pattern in %q", + tpl[idxs[i]:end]) + } + // Build the regexp pattern. + fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(i/2), patt) + + // Build the reverse template. + fmt.Fprintf(reverse, "%s%%s", raw) + + // Append variable name and compiled pattern. + varsN[i/2] = name + varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt)) + if err != nil { + return nil, err + } + } + // Add the remaining. + raw := tpl[end:] + pattern.WriteString(regexp.QuoteMeta(raw)) + if options.strictSlash { + pattern.WriteString("[/]?") + } + if typ == regexpTypeQuery { + // Add the default pattern if the query value is empty + if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" { + pattern.WriteString(defaultPattern) + } + } + if typ != regexpTypePrefix { + pattern.WriteByte('$') + } + + var wildcardHostPort bool + if typ == regexpTypeHost { + if !strings.Contains(pattern.String(), ":") { + wildcardHostPort = true + } + } + reverse.WriteString(raw) + if endSlash { + reverse.WriteByte('/') + } + // Compile full regexp. + reg, errCompile := regexp.Compile(pattern.String()) + if errCompile != nil { + return nil, errCompile + } + + // Check for capturing groups which used to work in older versions + if reg.NumSubexp() != len(idxs)/2 { + panic(fmt.Sprintf("route %s contains capture groups in its regexp. ", template) + + "Only non-capturing groups are accepted: e.g. (?:pattern) instead of (pattern)") + } + + // Done! + return &routeRegexp{ + template: template, + regexpType: typ, + options: options, + regexp: reg, + reverse: reverse.String(), + varsN: varsN, + varsR: varsR, + wildcardHostPort: wildcardHostPort, + }, nil +} + +// routeRegexp stores a regexp to match a host or path and information to +// collect and validate route variables. +type routeRegexp struct { + // The unmodified template. + template string + // The type of match + regexpType regexpType + // Options for matching + options routeRegexpOptions + // Expanded regexp. + regexp *regexp.Regexp + // Reverse template. + reverse string + // Variable names. + varsN []string + // Variable regexps (validators). + varsR []*regexp.Regexp + // Wildcard host-port (no strict port match in hostname) + wildcardHostPort bool +} + +// Match matches the regexp against the URL host or path. +func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { + if r.regexpType == regexpTypeHost { + host := getHost(req) + if r.wildcardHostPort { + // Don't be strict on the port match + if i := strings.Index(host, ":"); i != -1 { + host = host[:i] + } + } + return r.regexp.MatchString(host) + } + + if r.regexpType == regexpTypeQuery { + return r.matchQueryString(req) + } + path := req.URL.Path + if r.options.useEncodedPath { + path = req.URL.EscapedPath() + } + return r.regexp.MatchString(path) +} + +// url builds a URL part using the given values. +func (r *routeRegexp) url(values map[string]string) (string, error) { + urlValues := make([]interface{}, len(r.varsN)) + for k, v := range r.varsN { + value, ok := values[v] + if !ok { + return "", fmt.Errorf("mux: missing route variable %q", v) + } + if r.regexpType == regexpTypeQuery { + value = url.QueryEscape(value) + } + urlValues[k] = value + } + rv := fmt.Sprintf(r.reverse, urlValues...) + if !r.regexp.MatchString(rv) { + // The URL is checked against the full regexp, instead of checking + // individual variables. This is faster but to provide a good error + // message, we check individual regexps if the URL doesn't match. + for k, v := range r.varsN { + if !r.varsR[k].MatchString(values[v]) { + return "", fmt.Errorf( + "mux: variable %q doesn't match, expected %q", values[v], + r.varsR[k].String()) + } + } + } + return rv, nil +} + +// getURLQuery returns a single query parameter from a request URL. +// For a URL with foo=bar&baz=ding, we return only the relevant key +// value pair for the routeRegexp. +func (r *routeRegexp) getURLQuery(req *http.Request) string { + if r.regexpType != regexpTypeQuery { + return "" + } + templateKey := strings.SplitN(r.template, "=", 2)[0] + val, ok := findFirstQueryKey(req.URL.RawQuery, templateKey) + if ok { + return templateKey + "=" + val + } + return "" +} + +// findFirstQueryKey returns the same result as (*url.URL).Query()[key][0]. +// If key was not found, empty string and false is returned. +func findFirstQueryKey(rawQuery, key string) (value string, ok bool) { + query := []byte(rawQuery) + for len(query) > 0 { + foundKey := query + if i := bytes.IndexAny(foundKey, "&;"); i >= 0 { + foundKey, query = foundKey[:i], foundKey[i+1:] + } else { + query = query[:0] + } + if len(foundKey) == 0 { + continue + } + var value []byte + if i := bytes.IndexByte(foundKey, '='); i >= 0 { + foundKey, value = foundKey[:i], foundKey[i+1:] + } + if len(foundKey) < len(key) { + // Cannot possibly be key. + continue + } + keyString, err := url.QueryUnescape(string(foundKey)) + if err != nil { + continue + } + if keyString != key { + continue + } + valueString, err := url.QueryUnescape(string(value)) + if err != nil { + continue + } + return valueString, true + } + return "", false +} + +func (r *routeRegexp) matchQueryString(req *http.Request) bool { + return r.regexp.MatchString(r.getURLQuery(req)) +} + +// braceIndices returns the first level curly brace indices from a string. +// It returns an error in case of unbalanced braces. +func braceIndices(s string) ([]int, error) { + var level, idx int + var idxs []int + for i := 0; i < len(s); i++ { + switch s[i] { + case '{': + if level++; level == 1 { + idx = i + } + case '}': + if level--; level == 0 { + idxs = append(idxs, idx, i+1) + } else if level < 0 { + return nil, fmt.Errorf("mux: unbalanced braces in %q", s) + } + } + } + if level != 0 { + return nil, fmt.Errorf("mux: unbalanced braces in %q", s) + } + return idxs, nil +} + +// varGroupName builds a capturing group name for the indexed variable. +func varGroupName(idx int) string { + return "v" + strconv.Itoa(idx) +} + +// ---------------------------------------------------------------------------- +// routeRegexpGroup +// ---------------------------------------------------------------------------- + +// routeRegexpGroup groups the route matchers that carry variables. +type routeRegexpGroup struct { + host *routeRegexp + path *routeRegexp + queries []*routeRegexp +} + +// setMatch extracts the variables from the URL once a route matches. +func (v routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { + // Store host variables. + if v.host != nil { + host := getHost(req) + if v.host.wildcardHostPort { + // Don't be strict on the port match + if i := strings.Index(host, ":"); i != -1 { + host = host[:i] + } + } + matches := v.host.regexp.FindStringSubmatchIndex(host) + if len(matches) > 0 { + extractVars(host, matches, v.host.varsN, m.Vars) + } + } + path := req.URL.Path + if r.useEncodedPath { + path = req.URL.EscapedPath() + } + // Store path variables. + if v.path != nil { + matches := v.path.regexp.FindStringSubmatchIndex(path) + if len(matches) > 0 { + extractVars(path, matches, v.path.varsN, m.Vars) + // Check if we should redirect. + if v.path.options.strictSlash { + p1 := strings.HasSuffix(path, "/") + p2 := strings.HasSuffix(v.path.template, "/") + if p1 != p2 { + u, _ := url.Parse(req.URL.String()) + if p1 { + u.Path = u.Path[:len(u.Path)-1] + } else { + u.Path += "/" + } + m.Handler = http.RedirectHandler(u.String(), http.StatusMovedPermanently) + } + } + } + } + // Store query string variables. + for _, q := range v.queries { + queryURL := q.getURLQuery(req) + matches := q.regexp.FindStringSubmatchIndex(queryURL) + if len(matches) > 0 { + extractVars(queryURL, matches, q.varsN, m.Vars) + } + } +} + +// getHost tries its best to return the request host. +// According to section 14.23 of RFC 2616 the Host header +// can include the port number if the default value of 80 is not used. +func getHost(r *http.Request) string { + if r.URL.IsAbs() { + return r.URL.Host + } + return r.Host +} + +func extractVars(input string, matches []int, names []string, output map[string]string) { + for i, name := range names { + output[name] = input[matches[2*i+2]:matches[2*i+3]] + } +} diff --git a/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/gorilla/mux/route.go new file mode 100644 index 0000000..e8f11df --- /dev/null +++ b/vendor/github.com/gorilla/mux/route.go @@ -0,0 +1,765 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "regexp" + "strings" +) + +// Route stores information to match a request and build URLs. +type Route struct { + // Request handler for the route. + handler http.Handler + // If true, this route never matches: it is only used to build URLs. + buildOnly bool + // The name used to build URLs. + name string + // Error resulted from building a route. + err error + + // "global" reference to all named routes + namedRoutes map[string]*Route + + // config possibly passed in from `Router` + routeConf +} + +// SkipClean reports whether path cleaning is enabled for this route via +// Router.SkipClean. +func (r *Route) SkipClean() bool { + return r.skipClean +} + +// Match matches the route against the request. +func (r *Route) Match(req *http.Request, match *RouteMatch) bool { + if r.buildOnly || r.err != nil { + return false + } + + var matchErr error + + // Match everything. + for _, m := range r.matchers { + if matched := m.Match(req, match); !matched { + if _, ok := m.(methodMatcher); ok { + matchErr = ErrMethodMismatch + continue + } + + // Ignore ErrNotFound errors. These errors arise from match call + // to Subrouters. + // + // This prevents subsequent matching subrouters from failing to + // run middleware. If not ignored, the middleware would see a + // non-nil MatchErr and be skipped, even when there was a + // matching route. + if match.MatchErr == ErrNotFound { + match.MatchErr = nil + } + + matchErr = nil // nolint:ineffassign + return false + } else { + // Multiple routes may share the same path but use different HTTP methods. For instance: + // Route 1: POST "/users/{id}". + // Route 2: GET "/users/{id}", parameters: "id": "[0-9]+". + // + // The router must handle these cases correctly. For a GET request to "/users/abc" with "id" as "-2", + // The router should return a "Not Found" error as no route fully matches this request. + if match.MatchErr == ErrMethodMismatch { + match.MatchErr = nil + } + } + } + + if matchErr != nil { + match.MatchErr = matchErr + return false + } + + if match.MatchErr == ErrMethodMismatch && r.handler != nil { + // We found a route which matches request method, clear MatchErr + match.MatchErr = nil + // Then override the mis-matched handler + match.Handler = r.handler + } + + // Yay, we have a match. Let's collect some info about it. + if match.Route == nil { + match.Route = r + } + if match.Handler == nil { + match.Handler = r.handler + } + if match.Vars == nil { + match.Vars = make(map[string]string) + } + + // Set variables. + r.regexp.setMatch(req, match, r) + return true +} + +// ---------------------------------------------------------------------------- +// Route attributes +// ---------------------------------------------------------------------------- + +// GetError returns an error resulted from building the route, if any. +func (r *Route) GetError() error { + return r.err +} + +// BuildOnly sets the route to never match: it is only used to build URLs. +func (r *Route) BuildOnly() *Route { + r.buildOnly = true + return r +} + +// Handler -------------------------------------------------------------------- + +// Handler sets a handler for the route. +func (r *Route) Handler(handler http.Handler) *Route { + if r.err == nil { + r.handler = handler + } + return r +} + +// HandlerFunc sets a handler function for the route. +func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route { + return r.Handler(http.HandlerFunc(f)) +} + +// GetHandler returns the handler for the route, if any. +func (r *Route) GetHandler() http.Handler { + return r.handler +} + +// Name ----------------------------------------------------------------------- + +// Name sets the name for the route, used to build URLs. +// It is an error to call Name more than once on a route. +func (r *Route) Name(name string) *Route { + if r.name != "" { + r.err = fmt.Errorf("mux: route already has name %q, can't set %q", + r.name, name) + } + if r.err == nil { + r.name = name + r.namedRoutes[name] = r + } + return r +} + +// GetName returns the name for the route, if any. +func (r *Route) GetName() string { + return r.name +} + +// ---------------------------------------------------------------------------- +// Matchers +// ---------------------------------------------------------------------------- + +// matcher types try to match a request. +type matcher interface { + Match(*http.Request, *RouteMatch) bool +} + +// addMatcher adds a matcher to the route. +func (r *Route) addMatcher(m matcher) *Route { + if r.err == nil { + r.matchers = append(r.matchers, m) + } + return r +} + +// addRegexpMatcher adds a host or path matcher and builder to a route. +func (r *Route) addRegexpMatcher(tpl string, typ regexpType) error { + if r.err != nil { + return r.err + } + if typ == regexpTypePath || typ == regexpTypePrefix { + if len(tpl) > 0 && tpl[0] != '/' { + return fmt.Errorf("mux: path must start with a slash, got %q", tpl) + } + if r.regexp.path != nil { + tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl + } + } + rr, err := newRouteRegexp(tpl, typ, routeRegexpOptions{ + strictSlash: r.strictSlash, + useEncodedPath: r.useEncodedPath, + }) + if err != nil { + return err + } + for _, q := range r.regexp.queries { + if err = uniqueVars(rr.varsN, q.varsN); err != nil { + return err + } + } + if typ == regexpTypeHost { + if r.regexp.path != nil { + if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil { + return err + } + } + r.regexp.host = rr + } else { + if r.regexp.host != nil { + if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil { + return err + } + } + if typ == regexpTypeQuery { + r.regexp.queries = append(r.regexp.queries, rr) + } else { + r.regexp.path = rr + } + } + r.addMatcher(rr) + return nil +} + +// Headers -------------------------------------------------------------------- + +// headerMatcher matches the request against header values. +type headerMatcher map[string]string + +func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchMapWithString(m, r.Header, true) +} + +// Headers adds a matcher for request header values. +// It accepts a sequence of key/value pairs to be matched. For example: +// +// r := mux.NewRouter().NewRoute() +// r.Headers("Content-Type", "application/json", +// "X-Requested-With", "XMLHttpRequest") +// +// The above route will only match if both request header values match. +// If the value is an empty string, it will match any value if the key is set. +func (r *Route) Headers(pairs ...string) *Route { + if r.err == nil { + var headers map[string]string + headers, r.err = mapFromPairsToString(pairs...) + return r.addMatcher(headerMatcher(headers)) + } + return r +} + +// headerRegexMatcher matches the request against the route given a regex for the header +type headerRegexMatcher map[string]*regexp.Regexp + +func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchMapWithRegex(m, r.Header, true) +} + +// HeadersRegexp accepts a sequence of key/value pairs, where the value has regex +// support. For example: +// +// r := mux.NewRouter().NewRoute() +// r.HeadersRegexp("Content-Type", "application/(text|json)", +// "X-Requested-With", "XMLHttpRequest") +// +// The above route will only match if both the request header matches both regular expressions. +// If the value is an empty string, it will match any value if the key is set. +// Use the start and end of string anchors (^ and $) to match an exact value. +func (r *Route) HeadersRegexp(pairs ...string) *Route { + if r.err == nil { + var headers map[string]*regexp.Regexp + headers, r.err = mapFromPairsToRegex(pairs...) + return r.addMatcher(headerRegexMatcher(headers)) + } + return r +} + +// Host ----------------------------------------------------------------------- + +// Host adds a matcher for the URL host. +// It accepts a template with zero or more URL variables enclosed by {}. +// Variables can define an optional regexp pattern to be matched: +// +// - {name} matches anything until the next dot. +// +// - {name:pattern} matches the given regexp pattern. +// +// For example: +// +// r := mux.NewRouter().NewRoute() +// r.Host("www.example.com") +// r.Host("{subdomain}.domain.com") +// r.Host("{subdomain:[a-z]+}.domain.com") +// +// Variable names must be unique in a given route. They can be retrieved +// calling mux.Vars(request). +func (r *Route) Host(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, regexpTypeHost) + return r +} + +// MatcherFunc ---------------------------------------------------------------- + +// MatcherFunc is the function signature used by custom matchers. +type MatcherFunc func(*http.Request, *RouteMatch) bool + +// Match returns the match for a given request. +func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool { + return m(r, match) +} + +// MatcherFunc adds a custom function to be used as request matcher. +func (r *Route) MatcherFunc(f MatcherFunc) *Route { + return r.addMatcher(f) +} + +// Methods -------------------------------------------------------------------- + +// methodMatcher matches the request against HTTP methods. +type methodMatcher []string + +func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchInArray(m, r.Method) +} + +// Methods adds a matcher for HTTP methods. +// It accepts a sequence of one or more methods to be matched, e.g.: +// "GET", "POST", "PUT". +func (r *Route) Methods(methods ...string) *Route { + for k, v := range methods { + methods[k] = strings.ToUpper(v) + } + return r.addMatcher(methodMatcher(methods)) +} + +// Path ----------------------------------------------------------------------- + +// Path adds a matcher for the URL path. +// It accepts a template with zero or more URL variables enclosed by {}. The +// template must start with a "/". +// Variables can define an optional regexp pattern to be matched: +// +// - {name} matches anything until the next slash. +// +// - {name:pattern} matches the given regexp pattern. +// +// For example: +// +// r := mux.NewRouter().NewRoute() +// r.Path("/products/").Handler(ProductsHandler) +// r.Path("/products/{key}").Handler(ProductsHandler) +// r.Path("/articles/{category}/{id:[0-9]+}"). +// Handler(ArticleHandler) +// +// Variable names must be unique in a given route. They can be retrieved +// calling mux.Vars(request). +func (r *Route) Path(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, regexpTypePath) + return r +} + +// PathPrefix ----------------------------------------------------------------- + +// PathPrefix adds a matcher for the URL path prefix. This matches if the given +// template is a prefix of the full URL path. See Route.Path() for details on +// the tpl argument. +// +// Note that it does not treat slashes specially ("/foobar/" will be matched by +// the prefix "/foo") so you may want to use a trailing slash here. +// +// Also note that the setting of Router.StrictSlash() has no effect on routes +// with a PathPrefix matcher. +func (r *Route) PathPrefix(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, regexpTypePrefix) + return r +} + +// Query ---------------------------------------------------------------------- + +// Queries adds a matcher for URL query values. +// It accepts a sequence of key/value pairs. Values may define variables. +// For example: +// +// r := mux.NewRouter().NewRoute() +// r.Queries("foo", "bar", "id", "{id:[0-9]+}") +// +// The above route will only match if the URL contains the defined queries +// values, e.g.: ?foo=bar&id=42. +// +// If the value is an empty string, it will match any value if the key is set. +// +// Variables can define an optional regexp pattern to be matched: +// +// - {name} matches anything until the next slash. +// +// - {name:pattern} matches the given regexp pattern. +func (r *Route) Queries(pairs ...string) *Route { + length := len(pairs) + if length%2 != 0 { + r.err = fmt.Errorf( + "mux: number of parameters must be multiple of 2, got %v", pairs) + return nil + } + for i := 0; i < length; i += 2 { + if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], regexpTypeQuery); r.err != nil { + return r + } + } + + return r +} + +// Schemes -------------------------------------------------------------------- + +// schemeMatcher matches the request against URL schemes. +type schemeMatcher []string + +func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool { + scheme := r.URL.Scheme + // https://golang.org/pkg/net/http/#Request + // "For [most] server requests, fields other than Path and RawQuery will be + // empty." + // Since we're an http muxer, the scheme is either going to be http or https + // though, so we can just set it based on the tls termination state. + if scheme == "" { + if r.TLS == nil { + scheme = "http" + } else { + scheme = "https" + } + } + return matchInArray(m, scheme) +} + +// Schemes adds a matcher for URL schemes. +// It accepts a sequence of schemes to be matched, e.g.: "http", "https". +// If the request's URL has a scheme set, it will be matched against. +// Generally, the URL scheme will only be set if a previous handler set it, +// such as the ProxyHeaders handler from gorilla/handlers. +// If unset, the scheme will be determined based on the request's TLS +// termination state. +// The first argument to Schemes will be used when constructing a route URL. +func (r *Route) Schemes(schemes ...string) *Route { + for k, v := range schemes { + schemes[k] = strings.ToLower(v) + } + if len(schemes) > 0 { + r.buildScheme = schemes[0] + } + return r.addMatcher(schemeMatcher(schemes)) +} + +// BuildVarsFunc -------------------------------------------------------------- + +// BuildVarsFunc is the function signature used by custom build variable +// functions (which can modify route variables before a route's URL is built). +type BuildVarsFunc func(map[string]string) map[string]string + +// BuildVarsFunc adds a custom function to be used to modify build variables +// before a route's URL is built. +func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route { + if r.buildVarsFunc != nil { + // compose the old and new functions + old := r.buildVarsFunc + r.buildVarsFunc = func(m map[string]string) map[string]string { + return f(old(m)) + } + } else { + r.buildVarsFunc = f + } + return r +} + +// Subrouter ------------------------------------------------------------------ + +// Subrouter creates a subrouter for the route. +// +// It will test the inner routes only if the parent route matched. For example: +// +// r := mux.NewRouter().NewRoute() +// s := r.Host("www.example.com").Subrouter() +// s.HandleFunc("/products/", ProductsHandler) +// s.HandleFunc("/products/{key}", ProductHandler) +// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) +// +// Here, the routes registered in the subrouter won't be tested if the host +// doesn't match. +func (r *Route) Subrouter() *Router { + // initialize a subrouter with a copy of the parent route's configuration + router := &Router{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes} + r.addMatcher(router) + return router +} + +// ---------------------------------------------------------------------------- +// URL building +// ---------------------------------------------------------------------------- + +// URL builds a URL for the route. +// +// It accepts a sequence of key/value pairs for the route variables. For +// example, given this route: +// +// r := mux.NewRouter() +// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Name("article") +// +// ...a URL for it can be built using: +// +// url, err := r.Get("article").URL("category", "technology", "id", "42") +// +// ...which will return an url.URL with the following path: +// +// "/articles/technology/42" +// +// This also works for host variables: +// +// r := mux.NewRouter() +// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Host("{subdomain}.domain.com"). +// Name("article") +// +// // url.String() will be "http://news.domain.com/articles/technology/42" +// url, err := r.Get("article").URL("subdomain", "news", +// "category", "technology", +// "id", "42") +// +// The scheme of the resulting url will be the first argument that was passed to Schemes: +// +// // url.String() will be "https://example.com" +// r := mux.NewRouter().NewRoute() +// url, err := r.Host("example.com") +// .Schemes("https", "http").URL() +// +// All variables defined in the route are required, and their values must +// conform to the corresponding patterns. +func (r *Route) URL(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + var scheme, host, path string + queries := make([]string, 0, len(r.regexp.queries)) + if r.regexp.host != nil { + if host, err = r.regexp.host.url(values); err != nil { + return nil, err + } + scheme = "http" + if r.buildScheme != "" { + scheme = r.buildScheme + } + } + if r.regexp.path != nil { + if path, err = r.regexp.path.url(values); err != nil { + return nil, err + } + } + for _, q := range r.regexp.queries { + var query string + if query, err = q.url(values); err != nil { + return nil, err + } + queries = append(queries, query) + } + return &url.URL{ + Scheme: scheme, + Host: host, + Path: path, + RawQuery: strings.Join(queries, "&"), + }, nil +} + +// URLHost builds the host part of the URL for a route. See Route.URL(). +// +// The route must have a host defined. +func (r *Route) URLHost(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp.host == nil { + return nil, errors.New("mux: route doesn't have a host") + } + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + host, err := r.regexp.host.url(values) + if err != nil { + return nil, err + } + u := &url.URL{ + Scheme: "http", + Host: host, + } + if r.buildScheme != "" { + u.Scheme = r.buildScheme + } + return u, nil +} + +// URLPath builds the path part of the URL for a route. See Route.URL(). +// +// The route must have a path defined. +func (r *Route) URLPath(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp.path == nil { + return nil, errors.New("mux: route doesn't have a path") + } + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + path, err := r.regexp.path.url(values) + if err != nil { + return nil, err + } + return &url.URL{ + Path: path, + }, nil +} + +// GetPathTemplate returns the template used to build the +// route match. +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if the route does not define a path. +func (r *Route) GetPathTemplate() (string, error) { + if r.err != nil { + return "", r.err + } + if r.regexp.path == nil { + return "", errors.New("mux: route doesn't have a path") + } + return r.regexp.path.template, nil +} + +// GetPathRegexp returns the expanded regular expression used to match route path. +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if the route does not define a path. +func (r *Route) GetPathRegexp() (string, error) { + if r.err != nil { + return "", r.err + } + if r.regexp.path == nil { + return "", errors.New("mux: route does not have a path") + } + return r.regexp.path.regexp.String(), nil +} + +// GetQueriesRegexp returns the expanded regular expressions used to match the +// route queries. +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if the route does not have queries. +func (r *Route) GetQueriesRegexp() ([]string, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp.queries == nil { + return nil, errors.New("mux: route doesn't have queries") + } + queries := make([]string, 0, len(r.regexp.queries)) + for _, query := range r.regexp.queries { + queries = append(queries, query.regexp.String()) + } + return queries, nil +} + +// GetQueriesTemplates returns the templates used to build the +// query matching. +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if the route does not define queries. +func (r *Route) GetQueriesTemplates() ([]string, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp.queries == nil { + return nil, errors.New("mux: route doesn't have queries") + } + queries := make([]string, 0, len(r.regexp.queries)) + for _, query := range r.regexp.queries { + queries = append(queries, query.template) + } + return queries, nil +} + +// GetMethods returns the methods the route matches against +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if route does not have methods. +func (r *Route) GetMethods() ([]string, error) { + if r.err != nil { + return nil, r.err + } + for _, m := range r.matchers { + if methods, ok := m.(methodMatcher); ok { + return []string(methods), nil + } + } + return nil, errors.New("mux: route doesn't have methods") +} + +// GetHostTemplate returns the template used to build the +// route match. +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if the route does not define a host. +func (r *Route) GetHostTemplate() (string, error) { + if r.err != nil { + return "", r.err + } + if r.regexp.host == nil { + return "", errors.New("mux: route doesn't have a host") + } + return r.regexp.host.template, nil +} + +// GetVarNames returns the names of all variables added by regexp matchers +// These can be used to know which route variables should be passed into r.URL() +func (r *Route) GetVarNames() ([]string, error) { + if r.err != nil { + return nil, r.err + } + var varNames []string + if r.regexp.host != nil { + varNames = append(varNames, r.regexp.host.varsN...) + } + if r.regexp.path != nil { + varNames = append(varNames, r.regexp.path.varsN...) + } + for _, regx := range r.regexp.queries { + varNames = append(varNames, regx.varsN...) + } + return varNames, nil +} + +// prepareVars converts the route variable pairs into a map. If the route has a +// BuildVarsFunc, it is invoked. +func (r *Route) prepareVars(pairs ...string) (map[string]string, error) { + m, err := mapFromPairsToString(pairs...) + if err != nil { + return nil, err + } + return r.buildVars(m), nil +} + +func (r *Route) buildVars(m map[string]string) map[string]string { + if r.buildVarsFunc != nil { + m = r.buildVarsFunc(m) + } + return m +} diff --git a/vendor/github.com/gorilla/mux/test_helpers.go b/vendor/github.com/gorilla/mux/test_helpers.go new file mode 100644 index 0000000..5f5c496 --- /dev/null +++ b/vendor/github.com/gorilla/mux/test_helpers.go @@ -0,0 +1,19 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import "net/http" + +// SetURLVars sets the URL variables for the given request, to be accessed via +// mux.Vars for testing route behaviour. Arguments are not modified, a shallow +// copy is returned. +// +// This API should only be used for testing purposes; it provides a way to +// inject variables into the request context. Alternatively, URL variables +// can be set by making a route that captures the required variables, +// starting a server and sending the request to that server. +func SetURLVars(r *http.Request, val map[string]string) *http.Request { + return requestWithVars(r, val) +} diff --git a/vendor/github.com/gorilla/securecookie/.editorconfig b/vendor/github.com/gorilla/securecookie/.editorconfig new file mode 100644 index 0000000..2940ec9 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/.editorconfig @@ -0,0 +1,20 @@ +; https://editorconfig.org/ + +root = true + +[*] +insert_final_newline = true +charset = utf-8 +trim_trailing_whitespace = true +indent_style = space +indent_size = 2 + +[{Makefile,go.mod,go.sum,*.go,.gitmodules}] +indent_style = tab +indent_size = 4 + +[*.md] +indent_size = 4 +trim_trailing_whitespace = false + +eclint_indent_style = unset diff --git a/vendor/github.com/gorilla/securecookie/.gitignore b/vendor/github.com/gorilla/securecookie/.gitignore new file mode 100644 index 0000000..84039fe --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/.gitignore @@ -0,0 +1 @@ +coverage.coverprofile diff --git a/vendor/github.com/gorilla/securecookie/LICENSE b/vendor/github.com/gorilla/securecookie/LICENSE new file mode 100644 index 0000000..bb9d80b --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2023 The Gorilla Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/securecookie/Makefile b/vendor/github.com/gorilla/securecookie/Makefile new file mode 100644 index 0000000..2b9008a --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/Makefile @@ -0,0 +1,39 @@ +GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '') +GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest + +GO_SEC=$(shell which gosec 2> /dev/null || echo '') +GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest + +GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '') +GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest + +.PHONY: golangci-lint +golangci-lint: + $(if $(GO_LINT), ,go install $(GO_LINT_URI)) + @echo "##### Running golangci-lint" + golangci-lint run -v + +.PHONY: gosec +gosec: + $(if $(GO_SEC), ,go install $(GO_SEC_URI)) + @echo "##### Running gosec" + gosec ./... + +.PHONY: govulncheck +govulncheck: + $(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI)) + @echo "##### Running govulncheck" + govulncheck ./... + +.PHONY: verify +verify: golangci-lint gosec govulncheck + +.PHONY: test +test: + @echo "##### Running tests" + go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./... + +.PHONY: fuzz +fuzz: + @echo "##### Running fuzz tests" + go test -v -fuzz FuzzEncodeDecode -fuzztime 60s diff --git a/vendor/github.com/gorilla/securecookie/README.md b/vendor/github.com/gorilla/securecookie/README.md new file mode 100644 index 0000000..c3b9815 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/README.md @@ -0,0 +1,144 @@ +# gorilla/securecookie + +![testing](https://github.com/gorilla/securecookie/actions/workflows/test.yml/badge.svg) +[![codecov](https://codecov.io/github/gorilla/securecookie/branch/main/graph/badge.svg)](https://codecov.io/github/gorilla/securecookie) +[![godoc](https://godoc.org/github.com/gorilla/securecookie?status.svg)](https://godoc.org/github.com/gorilla/securecookie) +[![sourcegraph](https://sourcegraph.com/github.com/gorilla/securecookie/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/securecookie?badge) + +![Gorilla Logo](https://github.com/gorilla/.github/assets/53367916/d92caabf-98e0-473e-bfbf-ab554ba435e5) + +securecookie encodes and decodes authenticated and optionally encrypted +cookie values. + +Secure cookies can't be forged, because their values are validated using HMAC. +When encrypted, the content is also inaccessible to malicious eyes. It is still +recommended that sensitive data not be stored in cookies, and that HTTPS be used +to prevent cookie [replay attacks](https://en.wikipedia.org/wiki/Replay_attack). + +## Examples + +To use it, first create a new SecureCookie instance: + +```go +// Hash keys should be at least 32 bytes long +var hashKey = []byte("very-secret") +// Block keys should be 16 bytes (AES-128) or 32 bytes (AES-256) long. +// Shorter keys may weaken the encryption used. +var blockKey = []byte("a-lot-secret") +var s = securecookie.New(hashKey, blockKey) +``` + +The hashKey is required, used to authenticate the cookie value using HMAC. +It is recommended to use a key with 32 or 64 bytes. + +The blockKey is optional, used to encrypt the cookie value -- set it to nil +to not use encryption. If set, the length must correspond to the block size +of the encryption algorithm. For AES, used by default, valid lengths are +16, 24, or 32 bytes to select AES-128, AES-192, or AES-256. + +Strong keys can be created using the convenience function +`GenerateRandomKey()`. Note that keys created using `GenerateRandomKey()` are not +automatically persisted. New keys will be created when the application is +restarted, and previously issued cookies will not be able to be decoded. + +Once a SecureCookie instance is set, use it to encode a cookie value: + +```go +func SetCookieHandler(w http.ResponseWriter, r *http.Request) { + value := map[string]string{ + "foo": "bar", + } + if encoded, err := s.Encode("cookie-name", value); err == nil { + cookie := &http.Cookie{ + Name: "cookie-name", + Value: encoded, + Path: "/", + Secure: true, + HttpOnly: true, + } + http.SetCookie(w, cookie) + } +} +``` + +Later, use the same SecureCookie instance to decode and validate a cookie +value: + +```go +func ReadCookieHandler(w http.ResponseWriter, r *http.Request) { + if cookie, err := r.Cookie("cookie-name"); err == nil { + value := make(map[string]string) + if err = s2.Decode("cookie-name", cookie.Value, &value); err == nil { + fmt.Fprintf(w, "The value of foo is %q", value["foo"]) + } + } +} +``` + +We stored a map[string]string, but secure cookies can hold any value that +can be encoded using `encoding/gob`. To store custom types, they must be +registered first using gob.Register(). For basic types this is not needed; +it works out of the box. An optional JSON encoder that uses `encoding/json` is +available for types compatible with JSON. + +### Key Rotation +Rotating keys is an important part of any security strategy. The `EncodeMulti` and +`DecodeMulti` functions allow for multiple keys to be rotated in and out. +For example, let's take a system that stores keys in a map: + +```go +// keys stored in a map will not be persisted between restarts +// a more persistent storage should be considered for production applications. +var cookies = map[string]*securecookie.SecureCookie{ + "previous": securecookie.New( + securecookie.GenerateRandomKey(64), + securecookie.GenerateRandomKey(32), + ), + "current": securecookie.New( + securecookie.GenerateRandomKey(64), + securecookie.GenerateRandomKey(32), + ), +} +``` + +Using the current key to encode new cookies: +```go +func SetCookieHandler(w http.ResponseWriter, r *http.Request) { + value := map[string]string{ + "foo": "bar", + } + if encoded, err := securecookie.EncodeMulti("cookie-name", value, cookies["current"]); err == nil { + cookie := &http.Cookie{ + Name: "cookie-name", + Value: encoded, + Path: "/", + } + http.SetCookie(w, cookie) + } +} +``` + +Later, decode cookies. Check against all valid keys: +```go +func ReadCookieHandler(w http.ResponseWriter, r *http.Request) { + if cookie, err := r.Cookie("cookie-name"); err == nil { + value := make(map[string]string) + err = securecookie.DecodeMulti("cookie-name", cookie.Value, &value, cookies["current"], cookies["previous"]) + if err == nil { + fmt.Fprintf(w, "The value of foo is %q", value["foo"]) + } + } +} +``` + +Rotate the keys. This strategy allows previously issued cookies to be valid until the next rotation: +```go +func Rotate(newCookie *securecookie.SecureCookie) { + cookies["previous"] = cookies["current"] + cookies["current"] = newCookie +} +``` + +## License + +BSD licensed. See the LICENSE file for details. diff --git a/vendor/github.com/gorilla/securecookie/doc.go b/vendor/github.com/gorilla/securecookie/doc.go new file mode 100644 index 0000000..ae89408 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/doc.go @@ -0,0 +1,61 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package securecookie encodes and decodes authenticated and optionally +encrypted cookie values. + +Secure cookies can't be forged, because their values are validated using HMAC. +When encrypted, the content is also inaccessible to malicious eyes. + +To use it, first create a new SecureCookie instance: + + var hashKey = []byte("very-secret") + var blockKey = []byte("a-lot-secret") + var s = securecookie.New(hashKey, blockKey) + +The hashKey is required, used to authenticate the cookie value using HMAC. +It is recommended to use a key with 32 or 64 bytes. + +The blockKey is optional, used to encrypt the cookie value -- set it to nil +to not use encryption. If set, the length must correspond to the block size +of the encryption algorithm. For AES, used by default, valid lengths are +16, 24, or 32 bytes to select AES-128, AES-192, or AES-256. + +Strong keys can be created using the convenience function GenerateRandomKey(). + +Once a SecureCookie instance is set, use it to encode a cookie value: + + func SetCookieHandler(w http.ResponseWriter, r *http.Request) { + value := map[string]string{ + "foo": "bar", + } + if encoded, err := s.Encode("cookie-name", value); err == nil { + cookie := &http.Cookie{ + Name: "cookie-name", + Value: encoded, + Path: "/", + } + http.SetCookie(w, cookie) + } + } + +Later, use the same SecureCookie instance to decode and validate a cookie +value: + + func ReadCookieHandler(w http.ResponseWriter, r *http.Request) { + if cookie, err := r.Cookie("cookie-name"); err == nil { + value := make(map[string]string) + if err = s2.Decode("cookie-name", cookie.Value, &value); err == nil { + fmt.Fprintf(w, "The value of foo is %q", value["foo"]) + } + } + } + +We stored a map[string]string, but secure cookies can hold any value that +can be encoded using encoding/gob. To store custom types, they must be +registered first using gob.Register(). For basic types this is not needed; +it works out of the box. +*/ +package securecookie diff --git a/vendor/github.com/gorilla/securecookie/securecookie.go b/vendor/github.com/gorilla/securecookie/securecookie.go new file mode 100644 index 0000000..4d5ea86 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/securecookie.go @@ -0,0 +1,649 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securecookie + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "crypto/hmac" + "crypto/rand" + "crypto/sha256" + "crypto/subtle" + "encoding/base64" + "encoding/gob" + "encoding/json" + "fmt" + "hash" + "io" + "strconv" + "strings" + "time" +) + +// Error is the interface of all errors returned by functions in this library. +type Error interface { + error + + // IsUsage returns true for errors indicating the client code probably + // uses this library incorrectly. For example, the client may have + // failed to provide a valid hash key, or may have failed to configure + // the Serializer adequately for encoding value. + IsUsage() bool + + // IsDecode returns true for errors indicating that a cookie could not + // be decoded and validated. Since cookies are usually untrusted + // user-provided input, errors of this type should be expected. + // Usually, the proper action is simply to reject the request. + IsDecode() bool + + // IsInternal returns true for unexpected errors occurring in the + // securecookie implementation. + IsInternal() bool + + // Cause, if it returns a non-nil value, indicates that this error was + // propagated from some underlying library. If this method returns nil, + // this error was raised directly by this library. + // + // Cause is provided principally for debugging/logging purposes; it is + // rare that application logic should perform meaningfully different + // logic based on Cause. See, for example, the caveats described on + // (MultiError).Cause(). + Cause() error +} + +// errorType is a bitmask giving the error type(s) of an cookieError value. +type errorType int + +const ( + usageError = errorType(1 << iota) + decodeError + internalError +) + +type cookieError struct { + typ errorType + msg string + cause error +} + +func (e cookieError) IsUsage() bool { return (e.typ & usageError) != 0 } +func (e cookieError) IsDecode() bool { return (e.typ & decodeError) != 0 } +func (e cookieError) IsInternal() bool { return (e.typ & internalError) != 0 } + +func (e cookieError) Cause() error { return e.cause } + +func (e cookieError) Error() string { + parts := []string{"securecookie: "} + if e.msg == "" { + parts = append(parts, "error") + } else { + parts = append(parts, e.msg) + } + if c := e.Cause(); c != nil { + parts = append(parts, " - caused by: ", c.Error()) + } + return strings.Join(parts, "") +} + +var ( + errGeneratingIV = cookieError{typ: internalError, msg: "failed to generate random iv"} + + errNoCodecs = cookieError{typ: usageError, msg: "no codecs provided"} + errHashKeyNotSet = cookieError{typ: usageError, msg: "hash key is not set"} + errBlockKeyNotSet = cookieError{typ: usageError, msg: "block key is not set"} + errEncodedValueTooLong = cookieError{typ: usageError, msg: "the value is too long"} + + errValueToDecodeTooLong = cookieError{typ: decodeError, msg: "the value is too long"} + errTimestampInvalid = cookieError{typ: decodeError, msg: "invalid timestamp"} + errTimestampTooNew = cookieError{typ: decodeError, msg: "timestamp is too new"} + errTimestampExpired = cookieError{typ: decodeError, msg: "expired timestamp"} + errDecryptionFailed = cookieError{typ: decodeError, msg: "the value could not be decrypted"} + errValueNotByte = cookieError{typ: decodeError, msg: "value not a []byte."} + errValueNotBytePtr = cookieError{typ: decodeError, msg: "value not a pointer to []byte."} + + // ErrMacInvalid indicates that cookie decoding failed because the HMAC + // could not be extracted and verified. Direct use of this error + // variable is deprecated; it is public only for legacy compatibility, + // and may be privatized in the future, as it is rarely useful to + // distinguish between this error and other Error implementations. + ErrMacInvalid = cookieError{typ: decodeError, msg: "the value is not valid"} +) + +// Codec defines an interface to encode and decode cookie values. +type Codec interface { + Encode(name string, value interface{}) (string, error) + Decode(name, value string, dst interface{}) error +} + +// New returns a new SecureCookie. +// +// hashKey is required, used to authenticate values using HMAC. Create it using +// GenerateRandomKey(). It is recommended to use a key with 32 or 64 bytes. +// +// blockKey is optional, used to encrypt values. Create it using +// GenerateRandomKey(). The key length must correspond to the key size +// of the encryption algorithm. For AES, used by default, valid lengths are +// 16, 24, or 32 bytes to select AES-128, AES-192, or AES-256. +// The default encoder used for cookie serialization is encoding/gob. +// +// Note that keys created using GenerateRandomKey() are not automatically +// persisted. New keys will be created when the application is restarted, and +// previously issued cookies will not be able to be decoded. +func New(hashKey, blockKey []byte) *SecureCookie { + s := &SecureCookie{ + hashKey: hashKey, + blockKey: blockKey, + hashFunc: sha256.New, + maxAge: 86400 * 30, + maxLength: 4096, + sz: GobEncoder{}, + } + if len(hashKey) == 0 { + s.err = errHashKeyNotSet + } + if blockKey != nil { + s.BlockFunc(aes.NewCipher) + } + return s +} + +// SecureCookie encodes and decodes authenticated and optionally encrypted +// cookie values. +type SecureCookie struct { + hashKey []byte + hashFunc func() hash.Hash + blockKey []byte + block cipher.Block + maxLength int + maxAge int64 + minAge int64 + err error + sz Serializer + // For testing purposes, the function that returns the current timestamp. + // If not set, it will use time.Now().UTC().Unix(). + timeFunc func() int64 +} + +// Serializer provides an interface for providing custom serializers for cookie +// values. +type Serializer interface { + Serialize(src interface{}) ([]byte, error) + Deserialize(src []byte, dst interface{}) error +} + +// GobEncoder encodes cookie values using encoding/gob. This is the simplest +// encoder and can handle complex types via gob.Register. +type GobEncoder struct{} + +// JSONEncoder encodes cookie values using encoding/json. Users who wish to +// encode complex types need to satisfy the json.Marshaller and +// json.Unmarshaller interfaces. +type JSONEncoder struct{} + +// NopEncoder does not encode cookie values, and instead simply accepts a []byte +// (as an interface{}) and returns a []byte. This is particularly useful when +// you encoding an object upstream and do not wish to re-encode it. +type NopEncoder struct{} + +// MaxLength restricts the maximum length, in bytes, for the cookie value. +// +// Default is 4096, which is the maximum value accepted by Internet Explorer. +func (s *SecureCookie) MaxLength(value int) *SecureCookie { + s.maxLength = value + return s +} + +// MaxAge restricts the maximum age, in seconds, for the cookie value. +// +// Default is 86400 * 30. Set it to 0 for no restriction. +func (s *SecureCookie) MaxAge(value int) *SecureCookie { + s.maxAge = int64(value) + return s +} + +// MinAge restricts the minimum age, in seconds, for the cookie value. +// +// Default is 0 (no restriction). +func (s *SecureCookie) MinAge(value int) *SecureCookie { + s.minAge = int64(value) + return s +} + +// HashFunc sets the hash function used to create HMAC. +// +// Default is crypto/sha256.New. +func (s *SecureCookie) HashFunc(f func() hash.Hash) *SecureCookie { + s.hashFunc = f + return s +} + +// BlockFunc sets the encryption function used to create a cipher.Block. +// +// Default is crypto/aes.New. +func (s *SecureCookie) BlockFunc(f func([]byte) (cipher.Block, error)) *SecureCookie { + if s.blockKey == nil { + s.err = errBlockKeyNotSet + } else if block, err := f(s.blockKey); err == nil { + s.block = block + } else { + s.err = cookieError{cause: err, typ: usageError} + } + return s +} + +// Encoding sets the encoding/serialization method for cookies. +// +// Default is encoding/gob. To encode special structures using encoding/gob, +// they must be registered first using gob.Register(). +func (s *SecureCookie) SetSerializer(sz Serializer) *SecureCookie { + s.sz = sz + + return s +} + +// Encode encodes a cookie value. +// +// It serializes, optionally encrypts, signs with a message authentication code, +// and finally encodes the value. +// +// The name argument is the cookie name. It is stored with the encoded value. +// The value argument is the value to be encoded. It can be any value that can +// be encoded using the currently selected serializer; see SetSerializer(). +// +// It is the client's responsibility to ensure that value, when encoded using +// the current serialization/encryption settings on s and then base64-encoded, +// is shorter than the maximum permissible length. +func (s *SecureCookie) Encode(name string, value interface{}) (string, error) { + if s.err != nil { + return "", s.err + } + if s.hashKey == nil { + s.err = errHashKeyNotSet + return "", s.err + } + var err error + var b []byte + // 1. Serialize. + if b, err = s.sz.Serialize(value); err != nil { + return "", cookieError{cause: err, typ: usageError} + } + // 2. Encrypt (optional). + if s.block != nil { + if b, err = encrypt(s.block, b); err != nil { + return "", cookieError{cause: err, typ: usageError} + } + } + b = encode(b) + // 3. Create MAC for "name|date|value". Extra pipe to be used later. + b = []byte(fmt.Sprintf("%s|%d|%s|", name, s.timestamp(), b)) + mac := createMac(hmac.New(s.hashFunc, s.hashKey), b[:len(b)-1]) + // Append mac, remove name. + b = append(b, mac...)[len(name)+1:] + // 4. Encode to base64. + b = encode(b) + // 5. Check length. + if s.maxLength != 0 && len(b) > s.maxLength { + return "", fmt.Errorf("%s: %d", errEncodedValueTooLong, len(b)) + } + // Done. + return string(b), nil +} + +// Decode decodes a cookie value. +// +// It decodes, verifies a message authentication code, optionally decrypts and +// finally deserializes the value. +// +// The name argument is the cookie name. It must be the same name used when +// it was stored. The value argument is the encoded cookie value. The dst +// argument is where the cookie will be decoded. It must be a pointer. +func (s *SecureCookie) Decode(name, value string, dst interface{}) error { + if s.err != nil { + return s.err + } + if s.hashKey == nil { + s.err = errHashKeyNotSet + return s.err + } + // 1. Check length. + if s.maxLength != 0 && len(value) > s.maxLength { + return fmt.Errorf("%s: %d", errValueToDecodeTooLong, len(value)) + } + // 2. Decode from base64. + b, err := decode([]byte(value)) + if err != nil { + return err + } + // 3. Verify MAC. Value is "date|value|mac". + parts := bytes.SplitN(b, []byte("|"), 3) + if len(parts) != 3 { + return ErrMacInvalid + } + h := hmac.New(s.hashFunc, s.hashKey) + b = append([]byte(name+"|"), b[:len(b)-len(parts[2])-1]...) + if err = verifyMac(h, b, parts[2]); err != nil { + return err + } + // 4. Verify date ranges. + var t1 int64 + if t1, err = strconv.ParseInt(string(parts[0]), 10, 64); err != nil { + return errTimestampInvalid + } + t2 := s.timestamp() + if s.minAge != 0 && t1 > t2-s.minAge { + return errTimestampTooNew + } + if s.maxAge != 0 && t1 < t2-s.maxAge { + return errTimestampExpired + } + // 5. Decrypt (optional). + b, err = decode(parts[1]) + if err != nil { + return err + } + if s.block != nil { + if b, err = decrypt(s.block, b); err != nil { + return err + } + } + // 6. Deserialize. + if err = s.sz.Deserialize(b, dst); err != nil { + return cookieError{cause: err, typ: decodeError} + } + // Done. + return nil +} + +// timestamp returns the current timestamp, in seconds. +// +// For testing purposes, the function that generates the timestamp can be +// overridden. If not set, it will return time.Now().UTC().Unix(). +func (s *SecureCookie) timestamp() int64 { + if s.timeFunc == nil { + return time.Now().UTC().Unix() + } + return s.timeFunc() +} + +// Authentication ------------------------------------------------------------- + +// createMac creates a message authentication code (MAC). +func createMac(h hash.Hash, value []byte) []byte { + h.Write(value) + return h.Sum(nil) +} + +// verifyMac verifies that a message authentication code (MAC) is valid. +func verifyMac(h hash.Hash, value []byte, mac []byte) error { + mac2 := createMac(h, value) + // Check that both MACs are of equal length, as subtle.ConstantTimeCompare + // does not do this prior to Go 1.4. + if len(mac) == len(mac2) && subtle.ConstantTimeCompare(mac, mac2) == 1 { + return nil + } + return ErrMacInvalid +} + +// Encryption ----------------------------------------------------------------- + +// encrypt encrypts a value using the given block in counter mode. +// +// A random initialization vector ( https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Initialization_vector_(IV) ) with the length of the +// block size is prepended to the resulting ciphertext. +func encrypt(block cipher.Block, value []byte) ([]byte, error) { + iv := GenerateRandomKey(block.BlockSize()) + if iv == nil { + return nil, errGeneratingIV + } + // Encrypt it. + stream := cipher.NewCTR(block, iv) + stream.XORKeyStream(value, value) + // Return iv + ciphertext. + return append(iv, value...), nil +} + +// decrypt decrypts a value using the given block in counter mode. +// +// The value to be decrypted must be prepended by a initialization vector +// ( https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Initialization_vector_(IV) ) with the length of the block size. +func decrypt(block cipher.Block, value []byte) ([]byte, error) { + size := block.BlockSize() + if len(value) > size { + // Extract iv. + iv := value[:size] + // Extract ciphertext. + value = value[size:] + // Decrypt it. + stream := cipher.NewCTR(block, iv) + stream.XORKeyStream(value, value) + return value, nil + } + return nil, errDecryptionFailed +} + +// Serialization -------------------------------------------------------------- + +// Serialize encodes a value using gob. +func (e GobEncoder) Serialize(src interface{}) ([]byte, error) { + buf := new(bytes.Buffer) + enc := gob.NewEncoder(buf) + if err := enc.Encode(src); err != nil { + return nil, cookieError{cause: err, typ: usageError} + } + return buf.Bytes(), nil +} + +// Deserialize decodes a value using gob. +func (e GobEncoder) Deserialize(src []byte, dst interface{}) error { + dec := gob.NewDecoder(bytes.NewBuffer(src)) + if err := dec.Decode(dst); err != nil { + return cookieError{cause: err, typ: decodeError} + } + return nil +} + +// Serialize encodes a value using encoding/json. +func (e JSONEncoder) Serialize(src interface{}) ([]byte, error) { + buf := new(bytes.Buffer) + enc := json.NewEncoder(buf) + if err := enc.Encode(src); err != nil { + return nil, cookieError{cause: err, typ: usageError} + } + return buf.Bytes(), nil +} + +// Deserialize decodes a value using encoding/json. +func (e JSONEncoder) Deserialize(src []byte, dst interface{}) error { + dec := json.NewDecoder(bytes.NewReader(src)) + if err := dec.Decode(dst); err != nil { + return cookieError{cause: err, typ: decodeError} + } + return nil +} + +// Serialize passes a []byte through as-is. +func (e NopEncoder) Serialize(src interface{}) ([]byte, error) { + if b, ok := src.([]byte); ok { + return b, nil + } + + return nil, errValueNotByte +} + +// Deserialize passes a []byte through as-is. +func (e NopEncoder) Deserialize(src []byte, dst interface{}) error { + if dat, ok := dst.(*[]byte); ok { + *dat = src + return nil + } + return errValueNotBytePtr +} + +// Encoding ------------------------------------------------------------------- + +// encode encodes a value using base64. +func encode(value []byte) []byte { + encoded := make([]byte, base64.URLEncoding.EncodedLen(len(value))) + base64.URLEncoding.Encode(encoded, value) + return encoded +} + +// decode decodes a cookie using base64. +func decode(value []byte) ([]byte, error) { + decoded := make([]byte, base64.URLEncoding.DecodedLen(len(value))) + b, err := base64.URLEncoding.Decode(decoded, value) + if err != nil { + return nil, cookieError{cause: err, typ: decodeError, msg: "base64 decode failed"} + } + return decoded[:b], nil +} + +// Helpers -------------------------------------------------------------------- + +// GenerateRandomKey creates a random key with the given length in bytes. +// On failure, returns nil. +// +// Note that keys created using `GenerateRandomKey()` are not automatically +// persisted. New keys will be created when the application is restarted, and +// previously issued cookies will not be able to be decoded. +// +// Callers should explicitly check for the possibility of a nil return, treat +// it as a failure of the system random number generator, and not continue. +func GenerateRandomKey(length int) []byte { + k := make([]byte, length) + if _, err := io.ReadFull(rand.Reader, k); err != nil { + return nil + } + return k +} + +// CodecsFromPairs returns a slice of SecureCookie instances. +// +// It is a convenience function to create a list of codecs for key rotation. Note +// that the generated Codecs will have the default options applied: callers +// should iterate over each Codec and type-assert the underlying *SecureCookie to +// change these. +// +// Example: +// +// codecs := securecookie.CodecsFromPairs( +// []byte("new-hash-key"), +// []byte("new-block-key"), +// []byte("old-hash-key"), +// []byte("old-block-key"), +// ) +// +// // Modify each instance. +// for _, s := range codecs { +// if cookie, ok := s.(*securecookie.SecureCookie); ok { +// cookie.MaxAge(86400 * 7) +// cookie.SetSerializer(securecookie.JSONEncoder{}) +// cookie.HashFunc(sha512.New512_256) +// } +// } +func CodecsFromPairs(keyPairs ...[]byte) []Codec { + codecs := make([]Codec, len(keyPairs)/2+len(keyPairs)%2) + for i := 0; i < len(keyPairs); i += 2 { + var blockKey []byte + if i+1 < len(keyPairs) { + blockKey = keyPairs[i+1] + } + codecs[i/2] = New(keyPairs[i], blockKey) + } + return codecs +} + +// EncodeMulti encodes a cookie value using a group of codecs. +// +// The codecs are tried in order. Multiple codecs are accepted to allow +// key rotation. +// +// On error, may return a MultiError. +func EncodeMulti(name string, value interface{}, codecs ...Codec) (string, error) { + if len(codecs) == 0 { + return "", errNoCodecs + } + + var errors MultiError + for _, codec := range codecs { + encoded, err := codec.Encode(name, value) + if err == nil { + return encoded, nil + } + errors = append(errors, err) + } + return "", errors +} + +// DecodeMulti decodes a cookie value using a group of codecs. +// +// The codecs are tried in order. Multiple codecs are accepted to allow +// key rotation. +// +// On error, may return a MultiError. +func DecodeMulti(name string, value string, dst interface{}, codecs ...Codec) error { + if len(codecs) == 0 { + return errNoCodecs + } + + var errors MultiError + for _, codec := range codecs { + err := codec.Decode(name, value, dst) + if err == nil { + return nil + } + errors = append(errors, err) + } + return errors +} + +// MultiError groups multiple errors. +type MultiError []error + +func (m MultiError) IsUsage() bool { return m.any(func(e Error) bool { return e.IsUsage() }) } +func (m MultiError) IsDecode() bool { return m.any(func(e Error) bool { return e.IsDecode() }) } +func (m MultiError) IsInternal() bool { return m.any(func(e Error) bool { return e.IsInternal() }) } + +// Cause returns nil for MultiError; there is no unique underlying cause in the +// general case. +// +// Note: we could conceivably return a non-nil Cause only when there is exactly +// one child error with a Cause. However, it would be brittle for client code +// to rely on the arity of causes inside a MultiError, so we have opted not to +// provide this functionality. Clients which really wish to access the Causes +// of the underlying errors are free to iterate through the errors themselves. +func (m MultiError) Cause() error { return nil } + +func (m MultiError) Error() string { + s, n := "", 0 + for _, e := range m { + if e != nil { + if n == 0 { + s = e.Error() + } + n++ + } + } + switch n { + case 0: + return "(0 errors)" + case 1: + return s + case 2: + return s + " (and 1 other error)" + } + return fmt.Sprintf("%s (and %d other errors)", s, n-1) +} + +// any returns true if any element of m is an Error for which pred returns true. +func (m MultiError) any(pred func(Error) bool) bool { + for _, e := range m { + if ourErr, ok := e.(Error); ok && pred(ourErr) { + return true + } + } + return false +} diff --git a/vendor/github.com/gorilla/sessions/.editorconfig b/vendor/github.com/gorilla/sessions/.editorconfig new file mode 100644 index 0000000..2940ec9 --- /dev/null +++ b/vendor/github.com/gorilla/sessions/.editorconfig @@ -0,0 +1,20 @@ +; https://editorconfig.org/ + +root = true + +[*] +insert_final_newline = true +charset = utf-8 +trim_trailing_whitespace = true +indent_style = space +indent_size = 2 + +[{Makefile,go.mod,go.sum,*.go,.gitmodules}] +indent_style = tab +indent_size = 4 + +[*.md] +indent_size = 4 +trim_trailing_whitespace = false + +eclint_indent_style = unset diff --git a/vendor/github.com/gorilla/sessions/.gitignore b/vendor/github.com/gorilla/sessions/.gitignore new file mode 100644 index 0000000..84039fe --- /dev/null +++ b/vendor/github.com/gorilla/sessions/.gitignore @@ -0,0 +1 @@ +coverage.coverprofile diff --git a/vendor/github.com/gorilla/sessions/LICENSE b/vendor/github.com/gorilla/sessions/LICENSE new file mode 100644 index 0000000..bb9d80b --- /dev/null +++ b/vendor/github.com/gorilla/sessions/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2023 The Gorilla Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/sessions/Makefile b/vendor/github.com/gorilla/sessions/Makefile new file mode 100644 index 0000000..ac37ffd --- /dev/null +++ b/vendor/github.com/gorilla/sessions/Makefile @@ -0,0 +1,34 @@ +GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '') +GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest + +GO_SEC=$(shell which gosec 2> /dev/null || echo '') +GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest + +GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '') +GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest + +.PHONY: golangci-lint +golangci-lint: + $(if $(GO_LINT), ,go install $(GO_LINT_URI)) + @echo "##### Running golangci-lint" + golangci-lint run -v + +.PHONY: gosec +gosec: + $(if $(GO_SEC), ,go install $(GO_SEC_URI)) + @echo "##### Running gosec" + gosec ./... + +.PHONY: govulncheck +govulncheck: + $(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI)) + @echo "##### Running govulncheck" + govulncheck ./... + +.PHONY: verify +verify: golangci-lint gosec govulncheck + +.PHONY: test +test: + @echo "##### Running tests" + go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./... diff --git a/vendor/github.com/gorilla/sessions/README.md b/vendor/github.com/gorilla/sessions/README.md new file mode 100644 index 0000000..3aef6a4 --- /dev/null +++ b/vendor/github.com/gorilla/sessions/README.md @@ -0,0 +1,94 @@ +# sessions + +![testing](https://github.com/gorilla/sessions/actions/workflows/test.yml/badge.svg) +[![codecov](https://codecov.io/github/gorilla/sessions/branch/main/graph/badge.svg)](https://codecov.io/github/gorilla/sessions) +[![godoc](https://godoc.org/github.com/gorilla/sessions?status.svg)](https://godoc.org/github.com/gorilla/sessions) +[![sourcegraph](https://sourcegraph.com/github.com/gorilla/sessions/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/sessions?badge) + +![Gorilla Logo](https://github.com/gorilla/.github/assets/53367916/d92caabf-98e0-473e-bfbf-ab554ba435e5) + +gorilla/sessions provides cookie and filesystem sessions and infrastructure for +custom session backends. + +The key features are: + +- Simple API: use it as an easy way to set signed (and optionally + encrypted) cookies. +- Built-in backends to store sessions in cookies or the filesystem. +- Flash messages: session values that last until read. +- Convenient way to switch session persistency (aka "remember me") and set + other attributes. +- Mechanism to rotate authentication and encryption keys. +- Multiple sessions per request, even using different backends. +- Interfaces and infrastructure for custom session backends: sessions from + different stores can be retrieved and batch-saved using a common API. + +Let's start with an example that shows the sessions API in a nutshell: + +```go + import ( + "net/http" + "github.com/gorilla/sessions" + ) + + // Note: Don't store your key in your source code. Pass it via an + // environmental variable, or flag (or both), and don't accidentally commit it + // alongside your code. Ensure your key is sufficiently random - i.e. use Go's + // crypto/rand or securecookie.GenerateRandomKey(32) and persist the result. + var store = sessions.NewCookieStore([]byte(os.Getenv("SESSION_KEY"))) + + func MyHandler(w http.ResponseWriter, r *http.Request) { + // Get a session. We're ignoring the error resulted from decoding an + // existing session: Get() always returns a session, even if empty. + session, _ := store.Get(r, "session-name") + // Set some session values. + session.Values["foo"] = "bar" + session.Values[42] = 43 + // Save it before we write to the response/return from the handler. + err := session.Save(r, w) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + } +``` + +First we initialize a session store calling `NewCookieStore()` and passing a +secret key used to authenticate the session. Inside the handler, we call +`store.Get()` to retrieve an existing session or create a new one. Then we set +some session values in session.Values, which is a `map[interface{}]interface{}`. +And finally we call `session.Save()` to save the session in the response. + +More examples are available at [package documentation](https://pkg.go.dev/github.com/gorilla/sessions). + +## Store Implementations + +Other implementations of the `sessions.Store` interface: + +- [github.com/starJammer/gorilla-sessions-arangodb](https://github.com/starJammer/gorilla-sessions-arangodb) - ArangoDB +- [github.com/yosssi/boltstore](https://github.com/yosssi/boltstore) - Bolt +- [github.com/srinathgs/couchbasestore](https://github.com/srinathgs/couchbasestore) - Couchbase +- [github.com/denizeren/dynamostore](https://github.com/denizeren/dynamostore) - Dynamodb on AWS +- [github.com/savaki/dynastore](https://github.com/savaki/dynastore) - DynamoDB on AWS (Official AWS library) +- [github.com/bradleypeabody/gorilla-sessions-memcache](https://github.com/bradleypeabody/gorilla-sessions-memcache) - Memcache +- [github.com/dsoprea/go-appengine-sessioncascade](https://github.com/dsoprea/go-appengine-sessioncascade) - Memcache/Datastore/Context in AppEngine +- [github.com/kidstuff/mongostore](https://github.com/kidstuff/mongostore) - MongoDB +- [github.com/srinathgs/mysqlstore](https://github.com/srinathgs/mysqlstore) - MySQL +- [github.com/EnumApps/clustersqlstore](https://github.com/EnumApps/clustersqlstore) - MySQL Cluster +- [github.com/antonlindstrom/pgstore](https://github.com/antonlindstrom/pgstore) - PostgreSQL +- [github.com/boj/redistore](https://github.com/boj/redistore) - Redis +- [github.com/rbcervilla/redisstore](https://github.com/rbcervilla/redisstore) - Redis (Single, Sentinel, Cluster) +- [github.com/boj/rethinkstore](https://github.com/boj/rethinkstore) - RethinkDB +- [github.com/boj/riakstore](https://github.com/boj/riakstore) - Riak +- [github.com/michaeljs1990/sqlitestore](https://github.com/michaeljs1990/sqlitestore) - SQLite +- [github.com/wader/gormstore](https://github.com/wader/gormstore) - GORM (MySQL, PostgreSQL, SQLite) +- [github.com/gernest/qlstore](https://github.com/gernest/qlstore) - ql +- [github.com/quasoft/memstore](https://github.com/quasoft/memstore) - In-memory implementation for use in unit tests +- [github.com/lafriks/xormstore](https://github.com/lafriks/xormstore) - XORM (MySQL, PostgreSQL, SQLite, Microsoft SQL Server, TiDB) +- [github.com/GoogleCloudPlatform/firestore-gorilla-sessions](https://github.com/GoogleCloudPlatform/firestore-gorilla-sessions) - Cloud Firestore +- [github.com/stephenafamo/crdbstore](https://github.com/stephenafamo/crdbstore) - CockroachDB +- [github.com/ryicoh/tikvstore](github.com/ryicoh/tikvstore) - TiKV + +## License + +BSD licensed. See the LICENSE file for details. diff --git a/vendor/github.com/gorilla/sessions/cookie.go b/vendor/github.com/gorilla/sessions/cookie.go new file mode 100644 index 0000000..6612662 --- /dev/null +++ b/vendor/github.com/gorilla/sessions/cookie.go @@ -0,0 +1,20 @@ +//go:build !go1.11 +// +build !go1.11 + +package sessions + +import "net/http" + +// newCookieFromOptions returns an http.Cookie with the options set. +func newCookieFromOptions(name, value string, options *Options) *http.Cookie { + return &http.Cookie{ + Name: name, + Value: value, + Path: options.Path, + Domain: options.Domain, + MaxAge: options.MaxAge, + Secure: options.Secure, + HttpOnly: options.HttpOnly, + } + +} diff --git a/vendor/github.com/gorilla/sessions/cookie_go111.go b/vendor/github.com/gorilla/sessions/cookie_go111.go new file mode 100644 index 0000000..9b58828 --- /dev/null +++ b/vendor/github.com/gorilla/sessions/cookie_go111.go @@ -0,0 +1,21 @@ +//go:build go1.11 +// +build go1.11 + +package sessions + +import "net/http" + +// newCookieFromOptions returns an http.Cookie with the options set. +func newCookieFromOptions(name, value string, options *Options) *http.Cookie { + return &http.Cookie{ + Name: name, + Value: value, + Path: options.Path, + Domain: options.Domain, + MaxAge: options.MaxAge, + Secure: options.Secure, + HttpOnly: options.HttpOnly, + SameSite: options.SameSite, + } + +} diff --git a/vendor/github.com/gorilla/sessions/doc.go b/vendor/github.com/gorilla/sessions/doc.go new file mode 100644 index 0000000..946bf5c --- /dev/null +++ b/vendor/github.com/gorilla/sessions/doc.go @@ -0,0 +1,207 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package sessions provides cookie and filesystem sessions and +infrastructure for custom session backends. + +The key features are: + + * Simple API: use it as an easy way to set signed (and optionally + encrypted) cookies. + * Built-in backends to store sessions in cookies or the filesystem. + * Flash messages: session values that last until read. + * Convenient way to switch session persistency (aka "remember me") and set + other attributes. + * Mechanism to rotate authentication and encryption keys. + * Multiple sessions per request, even using different backends. + * Interfaces and infrastructure for custom session backends: sessions from + different stores can be retrieved and batch-saved using a common API. + +Let's start with an example that shows the sessions API in a nutshell: + + import ( + "net/http" + "github.com/gorilla/sessions" + ) + + // Note: Don't store your key in your source code. Pass it via an + // environmental variable, or flag (or both), and don't accidentally commit it + // alongside your code. Ensure your key is sufficiently random - i.e. use Go's + // crypto/rand or securecookie.GenerateRandomKey(32) and persist the result. + // Ensure SESSION_KEY exists in the environment, or sessions will fail. + var store = sessions.NewCookieStore([]byte(os.Getenv("SESSION_KEY"))) + + func MyHandler(w http.ResponseWriter, r *http.Request) { + // Get a session. Get() always returns a session, even if empty. + session, err := store.Get(r, "session-name") + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + // Set some session values. + session.Values["foo"] = "bar" + session.Values[42] = 43 + // Save it before we write to the response/return from the handler. + err = session.Save(r, w) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + } + +First we initialize a session store calling NewCookieStore() and passing a +secret key used to authenticate the session. Inside the handler, we call +store.Get() to retrieve an existing session or a new one. Then we set some +session values in session.Values, which is a map[interface{}]interface{}. +And finally we call session.Save() to save the session in the response. + +Note that in production code, we should check for errors when calling +session.Save(r, w), and either display an error message or otherwise handle it. + +Save must be called before writing to the response, otherwise the session +cookie will not be sent to the client. + +That's all you need to know for the basic usage. Let's take a look at other +options, starting with flash messages. + +Flash messages are session values that last until read. The term appeared with +Ruby On Rails a few years back. When we request a flash message, it is removed +from the session. To add a flash, call session.AddFlash(), and to get all +flashes, call session.Flashes(). Here is an example: + + func MyHandler(w http.ResponseWriter, r *http.Request) { + // Get a session. + session, err := store.Get(r, "session-name") + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + // Get the previous flashes, if any. + if flashes := session.Flashes(); len(flashes) > 0 { + // Use the flash values. + } else { + // Set a new flash. + session.AddFlash("Hello, flash messages world!") + } + err = session.Save(r, w) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + } + +Flash messages are useful to set information to be read after a redirection, +like after form submissions. + +There may also be cases where you want to store a complex datatype within a +session, such as a struct. Sessions are serialised using the encoding/gob package, +so it is easy to register new datatypes for storage in sessions: + + import( + "encoding/gob" + "github.com/gorilla/sessions" + ) + + type Person struct { + FirstName string + LastName string + Email string + Age int + } + + type M map[string]interface{} + + func init() { + + gob.Register(&Person{}) + gob.Register(&M{}) + } + +As it's not possible to pass a raw type as a parameter to a function, gob.Register() +relies on us passing it a value of the desired type. In the example above we've passed +it a pointer to a struct and a pointer to a custom type representing a +map[string]interface. (We could have passed non-pointer values if we wished.) This will +then allow us to serialise/deserialise values of those types to and from our sessions. + +Note that because session values are stored in a map[string]interface{}, there's +a need to type-assert data when retrieving it. We'll use the Person struct we registered above: + + func MyHandler(w http.ResponseWriter, r *http.Request) { + session, err := store.Get(r, "session-name") + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + // Retrieve our struct and type-assert it + val := session.Values["person"] + var person = &Person{} + if person, ok := val.(*Person); !ok { + // Handle the case that it's not an expected type + } + + // Now we can use our person object + } + +By default, session cookies last for a month. This is probably too long for +some cases, but it is easy to change this and other attributes during +runtime. Sessions can be configured individually or the store can be +configured and then all sessions saved using it will use that configuration. +We access session.Options or store.Options to set a new configuration. The +fields are basically a subset of http.Cookie fields. Let's change the +maximum age of a session to one week: + + session.Options = &sessions.Options{ + Path: "/", + MaxAge: 86400 * 7, + HttpOnly: true, + } + +Sometimes we may want to change authentication and/or encryption keys without +breaking existing sessions. The CookieStore supports key rotation, and to use +it you just need to set multiple authentication and encryption keys, in pairs, +to be tested in order: + + var store = sessions.NewCookieStore( + []byte("new-authentication-key"), + []byte("new-encryption-key"), + []byte("old-authentication-key"), + []byte("old-encryption-key"), + ) + +New sessions will be saved using the first pair. Old sessions can still be +read because the first pair will fail, and the second will be tested. This +makes it easy to "rotate" secret keys and still be able to validate existing +sessions. Note: for all pairs the encryption key is optional; set it to nil +or omit it and and encryption won't be used. + +Multiple sessions can be used in the same request, even with different +session backends. When this happens, calling Save() on each session +individually would be cumbersome, so we have a way to save all sessions +at once: it's sessions.Save(). Here's an example: + + var store = sessions.NewCookieStore([]byte("something-very-secret")) + + func MyHandler(w http.ResponseWriter, r *http.Request) { + // Get a session and set a value. + session1, _ := store.Get(r, "session-one") + session1.Values["foo"] = "bar" + // Get another session and set another value. + session2, _ := store.Get(r, "session-two") + session2.Values[42] = 43 + // Save all sessions. + err = sessions.Save(r, w) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + } + +This is possible because when we call Get() from a session store, it adds the +session to a common registry. Save() uses it to save all registered sessions. +*/ +package sessions diff --git a/vendor/github.com/gorilla/sessions/lex.go b/vendor/github.com/gorilla/sessions/lex.go new file mode 100644 index 0000000..4bbbe10 --- /dev/null +++ b/vendor/github.com/gorilla/sessions/lex.go @@ -0,0 +1,102 @@ +// This file contains code adapted from the Go standard library +// https://github.com/golang/go/blob/39ad0fd0789872f9469167be7fe9578625ff246e/src/net/http/lex.go + +package sessions + +import "strings" + +var isTokenTable = [127]bool{ + '!': true, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '*': true, + '+': true, + '-': true, + '.': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'W': true, + 'V': true, + 'X': true, + 'Y': true, + 'Z': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '|': true, + '~': true, +} + +func isToken(r rune) bool { + i := int(r) + return i < len(isTokenTable) && isTokenTable[i] +} + +func isNotToken(r rune) bool { + return !isToken(r) +} + +func isCookieNameValid(raw string) bool { + if raw == "" { + return false + } + return strings.IndexFunc(raw, isNotToken) < 0 +} diff --git a/vendor/github.com/gorilla/sessions/options.go b/vendor/github.com/gorilla/sessions/options.go new file mode 100644 index 0000000..d33d076 --- /dev/null +++ b/vendor/github.com/gorilla/sessions/options.go @@ -0,0 +1,19 @@ +//go:build !go1.11 +// +build !go1.11 + +package sessions + +// Options stores configuration for a session or session store. +// +// Fields are a subset of http.Cookie fields. +type Options struct { + Path string + Domain string + // MaxAge=0 means no Max-Age attribute specified and the cookie will be + // deleted after the browser session ends. + // MaxAge<0 means delete cookie immediately. + // MaxAge>0 means Max-Age attribute present and given in seconds. + MaxAge int + Secure bool + HttpOnly bool +} diff --git a/vendor/github.com/gorilla/sessions/options_go111.go b/vendor/github.com/gorilla/sessions/options_go111.go new file mode 100644 index 0000000..af9cdf0 --- /dev/null +++ b/vendor/github.com/gorilla/sessions/options_go111.go @@ -0,0 +1,23 @@ +//go:build go1.11 +// +build go1.11 + +package sessions + +import "net/http" + +// Options stores configuration for a session or session store. +// +// Fields are a subset of http.Cookie fields. +type Options struct { + Path string + Domain string + // MaxAge=0 means no Max-Age attribute specified and the cookie will be + // deleted after the browser session ends. + // MaxAge<0 means delete cookie immediately. + // MaxAge>0 means Max-Age attribute present and given in seconds. + MaxAge int + Secure bool + HttpOnly bool + // Defaults to http.SameSiteDefaultMode + SameSite http.SameSite +} diff --git a/vendor/github.com/gorilla/sessions/sessions.go b/vendor/github.com/gorilla/sessions/sessions.go new file mode 100644 index 0000000..c052b28 --- /dev/null +++ b/vendor/github.com/gorilla/sessions/sessions.go @@ -0,0 +1,218 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sessions + +import ( + "context" + "encoding/gob" + "fmt" + "net/http" + "time" +) + +// Default flashes key. +const flashesKey = "_flash" + +// Session -------------------------------------------------------------------- + +// NewSession is called by session stores to create a new session instance. +func NewSession(store Store, name string) *Session { + return &Session{ + Values: make(map[interface{}]interface{}), + store: store, + name: name, + Options: new(Options), + } +} + +// Session stores the values and optional configuration for a session. +type Session struct { + // The ID of the session, generated by stores. It should not be used for + // user data. + ID string + // Values contains the user-data for the session. + Values map[interface{}]interface{} + Options *Options + IsNew bool + store Store + name string +} + +// Flashes returns a slice of flash messages from the session. +// +// A single variadic argument is accepted, and it is optional: it defines +// the flash key. If not defined "_flash" is used by default. +func (s *Session) Flashes(vars ...string) []interface{} { + var flashes []interface{} + key := flashesKey + if len(vars) > 0 { + key = vars[0] + } + if v, ok := s.Values[key]; ok { + // Drop the flashes and return it. + delete(s.Values, key) + flashes = v.([]interface{}) + } + return flashes +} + +// AddFlash adds a flash message to the session. +// +// A single variadic argument is accepted, and it is optional: it defines +// the flash key. If not defined "_flash" is used by default. +func (s *Session) AddFlash(value interface{}, vars ...string) { + key := flashesKey + if len(vars) > 0 { + key = vars[0] + } + var flashes []interface{} + if v, ok := s.Values[key]; ok { + flashes = v.([]interface{}) + } + s.Values[key] = append(flashes, value) +} + +// Save is a convenience method to save this session. It is the same as calling +// store.Save(request, response, session). You should call Save before writing to +// the response or returning from the handler. +func (s *Session) Save(r *http.Request, w http.ResponseWriter) error { + return s.store.Save(r, w, s) +} + +// Name returns the name used to register the session. +func (s *Session) Name() string { + return s.name +} + +// Store returns the session store used to register the session. +func (s *Session) Store() Store { + return s.store +} + +// Registry ------------------------------------------------------------------- + +// sessionInfo stores a session tracked by the registry. +type sessionInfo struct { + s *Session + e error +} + +// contextKey is the type used to store the registry in the context. +type contextKey int + +// registryKey is the key used to store the registry in the context. +const registryKey contextKey = 0 + +// GetRegistry returns a registry instance for the current request. +func GetRegistry(r *http.Request) *Registry { + var ctx = r.Context() + registry := ctx.Value(registryKey) + if registry != nil { + return registry.(*Registry) + } + newRegistry := &Registry{ + request: r, + sessions: make(map[string]sessionInfo), + } + *r = *r.WithContext(context.WithValue(ctx, registryKey, newRegistry)) + return newRegistry +} + +// Registry stores sessions used during a request. +type Registry struct { + request *http.Request + sessions map[string]sessionInfo +} + +// Get registers and returns a session for the given name and session store. +// +// It returns a new session if there are no sessions registered for the name. +func (s *Registry) Get(store Store, name string) (session *Session, err error) { + if !isCookieNameValid(name) { + return nil, fmt.Errorf("sessions: invalid character in cookie name: %s", name) + } + if info, ok := s.sessions[name]; ok { + session, err = info.s, info.e + } else { + session, err = store.New(s.request, name) + session.name = name + s.sessions[name] = sessionInfo{s: session, e: err} + } + session.store = store + return +} + +// Save saves all sessions registered for the current request. +func (s *Registry) Save(w http.ResponseWriter) error { + var errMulti MultiError + for name, info := range s.sessions { + session := info.s + if session.store == nil { + errMulti = append(errMulti, fmt.Errorf( + "sessions: missing store for session %q", name)) + } else if err := session.store.Save(s.request, w, session); err != nil { + errMulti = append(errMulti, fmt.Errorf( + "sessions: error saving session %q -- %v", name, err)) + } + } + if errMulti != nil { + return errMulti + } + return nil +} + +// Helpers -------------------------------------------------------------------- + +func init() { + gob.Register([]interface{}{}) +} + +// Save saves all sessions used during the current request. +func Save(r *http.Request, w http.ResponseWriter) error { + return GetRegistry(r).Save(w) +} + +// NewCookie returns an http.Cookie with the options set. It also sets +// the Expires field calculated based on the MaxAge value, for Internet +// Explorer compatibility. +func NewCookie(name, value string, options *Options) *http.Cookie { + cookie := newCookieFromOptions(name, value, options) + if options.MaxAge > 0 { + d := time.Duration(options.MaxAge) * time.Second + cookie.Expires = time.Now().Add(d) + } else if options.MaxAge < 0 { + // Set it to the past to expire now. + cookie.Expires = time.Unix(1, 0) + } + return cookie +} + +// Error ---------------------------------------------------------------------- + +// MultiError stores multiple errors. +// +// Borrowed from the App Engine SDK. +type MultiError []error + +func (m MultiError) Error() string { + s, n := "", 0 + for _, e := range m { + if e != nil { + if n == 0 { + s = e.Error() + } + n++ + } + } + switch n { + case 0: + return "(0 errors)" + case 1: + return s + case 2: + return s + " (and 1 other error)" + } + return fmt.Sprintf("%s (and %d other errors)", s, n-1) +} diff --git a/vendor/github.com/gorilla/sessions/store.go b/vendor/github.com/gorilla/sessions/store.go new file mode 100644 index 0000000..24db822 --- /dev/null +++ b/vendor/github.com/gorilla/sessions/store.go @@ -0,0 +1,298 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sessions + +import ( + "encoding/base32" + "net/http" + "os" + "path/filepath" + "sync" + + "github.com/gorilla/securecookie" +) + +const ( + // File name prefix for session files. + sessionFilePrefix = "session_" +) + +// Store is an interface for custom session stores. +// +// See CookieStore and FilesystemStore for examples. +type Store interface { + // Get should return a cached session. + Get(r *http.Request, name string) (*Session, error) + + // New should create and return a new session. + // + // Note that New should never return a nil session, even in the case of + // an error if using the Registry infrastructure to cache the session. + New(r *http.Request, name string) (*Session, error) + + // Save should persist session to the underlying store implementation. + Save(r *http.Request, w http.ResponseWriter, s *Session) error +} + +// CookieStore ---------------------------------------------------------------- + +// NewCookieStore returns a new CookieStore. +// +// Keys are defined in pairs to allow key rotation, but the common case is +// to set a single authentication key and optionally an encryption key. +// +// The first key in a pair is used for authentication and the second for +// encryption. The encryption key can be set to nil or omitted in the last +// pair, but the authentication key is required in all pairs. +// +// It is recommended to use an authentication key with 32 or 64 bytes. +// The encryption key, if set, must be either 16, 24, or 32 bytes to select +// AES-128, AES-192, or AES-256 modes. +func NewCookieStore(keyPairs ...[]byte) *CookieStore { + cs := &CookieStore{ + Codecs: securecookie.CodecsFromPairs(keyPairs...), + Options: &Options{ + Path: "/", + MaxAge: 86400 * 30, + SameSite: http.SameSiteNoneMode, + Secure: true, + }, + } + + cs.MaxAge(cs.Options.MaxAge) + return cs +} + +// CookieStore stores sessions using secure cookies. +type CookieStore struct { + Codecs []securecookie.Codec + Options *Options // default configuration +} + +// Get returns a session for the given name after adding it to the registry. +// +// It returns a new session if the sessions doesn't exist. Access IsNew on +// the session to check if it is an existing session or a new one. +// +// It returns a new session and an error if the session exists but could +// not be decoded. +func (s *CookieStore) Get(r *http.Request, name string) (*Session, error) { + return GetRegistry(r).Get(s, name) +} + +// New returns a session for the given name without adding it to the registry. +// +// The difference between New() and Get() is that calling New() twice will +// decode the session data twice, while Get() registers and reuses the same +// decoded session after the first call. +func (s *CookieStore) New(r *http.Request, name string) (*Session, error) { + session := NewSession(s, name) + opts := *s.Options + session.Options = &opts + session.IsNew = true + var err error + if c, errCookie := r.Cookie(name); errCookie == nil { + err = securecookie.DecodeMulti(name, c.Value, &session.Values, + s.Codecs...) + if err == nil { + session.IsNew = false + } + } + return session, err +} + +// Save adds a single session to the response. +func (s *CookieStore) Save(r *http.Request, w http.ResponseWriter, + session *Session) error { + encoded, err := securecookie.EncodeMulti(session.Name(), session.Values, + s.Codecs...) + if err != nil { + return err + } + http.SetCookie(w, NewCookie(session.Name(), encoded, session.Options)) + return nil +} + +// MaxAge sets the maximum age for the store and the underlying cookie +// implementation. Individual sessions can be deleted by setting Options.MaxAge +// = -1 for that session. +func (s *CookieStore) MaxAge(age int) { + s.Options.MaxAge = age + + // Set the maxAge for each securecookie instance. + for _, codec := range s.Codecs { + if sc, ok := codec.(*securecookie.SecureCookie); ok { + sc.MaxAge(age) + } + } +} + +// FilesystemStore ------------------------------------------------------------ + +var fileMutex sync.RWMutex + +// NewFilesystemStore returns a new FilesystemStore. +// +// The path argument is the directory where sessions will be saved. If empty +// it will use os.TempDir(). +// +// See NewCookieStore() for a description of the other parameters. +func NewFilesystemStore(path string, keyPairs ...[]byte) *FilesystemStore { + if path == "" { + path = os.TempDir() + } + fs := &FilesystemStore{ + Codecs: securecookie.CodecsFromPairs(keyPairs...), + Options: &Options{ + Path: "/", + MaxAge: 86400 * 30, + }, + path: path, + } + + fs.MaxAge(fs.Options.MaxAge) + return fs +} + +// FilesystemStore stores sessions in the filesystem. +// +// It also serves as a reference for custom stores. +// +// This store is still experimental and not well tested. Feedback is welcome. +type FilesystemStore struct { + Codecs []securecookie.Codec + Options *Options // default configuration + path string +} + +// MaxLength restricts the maximum length of new sessions to l. +// If l is 0 there is no limit to the size of a session, use with caution. +// The default for a new FilesystemStore is 4096. +func (s *FilesystemStore) MaxLength(l int) { + for _, c := range s.Codecs { + if codec, ok := c.(*securecookie.SecureCookie); ok { + codec.MaxLength(l) + } + } +} + +// Get returns a session for the given name after adding it to the registry. +// +// See CookieStore.Get(). +func (s *FilesystemStore) Get(r *http.Request, name string) (*Session, error) { + return GetRegistry(r).Get(s, name) +} + +// New returns a session for the given name without adding it to the registry. +// +// See CookieStore.New(). +func (s *FilesystemStore) New(r *http.Request, name string) (*Session, error) { + session := NewSession(s, name) + opts := *s.Options + session.Options = &opts + session.IsNew = true + var err error + if c, errCookie := r.Cookie(name); errCookie == nil { + err = securecookie.DecodeMulti(name, c.Value, &session.ID, s.Codecs...) + if err == nil { + err = s.load(session) + if err == nil { + session.IsNew = false + } + } + } + return session, err +} + +var base32RawStdEncoding = base32.StdEncoding.WithPadding(base32.NoPadding) + +// Save adds a single session to the response. +// +// If the Options.MaxAge of the session is <= 0 then the session file will be +// deleted from the store path. With this process it enforces the properly +// session cookie handling so no need to trust in the cookie management in the +// web browser. +func (s *FilesystemStore) Save(r *http.Request, w http.ResponseWriter, + session *Session) error { + // Delete if max-age is <= 0 + if session.Options.MaxAge <= 0 { + if err := s.erase(session); err != nil && !os.IsNotExist(err) { + return err + } + http.SetCookie(w, NewCookie(session.Name(), "", session.Options)) + return nil + } + + if session.ID == "" { + // Because the ID is used in the filename, encode it to + // use alphanumeric characters only. + session.ID = base32RawStdEncoding.EncodeToString( + securecookie.GenerateRandomKey(32)) + } + if err := s.save(session); err != nil { + return err + } + encoded, err := securecookie.EncodeMulti(session.Name(), session.ID, + s.Codecs...) + if err != nil { + return err + } + http.SetCookie(w, NewCookie(session.Name(), encoded, session.Options)) + return nil +} + +// MaxAge sets the maximum age for the store and the underlying cookie +// implementation. Individual sessions can be deleted by setting Options.MaxAge +// = -1 for that session. +func (s *FilesystemStore) MaxAge(age int) { + s.Options.MaxAge = age + + // Set the maxAge for each securecookie instance. + for _, codec := range s.Codecs { + if sc, ok := codec.(*securecookie.SecureCookie); ok { + sc.MaxAge(age) + } + } +} + +// save writes encoded session.Values to a file. +func (s *FilesystemStore) save(session *Session) error { + encoded, err := securecookie.EncodeMulti(session.Name(), session.Values, + s.Codecs...) + if err != nil { + return err + } + filename := filepath.Join(s.path, sessionFilePrefix+filepath.Base(session.ID)) + fileMutex.Lock() + defer fileMutex.Unlock() + return os.WriteFile(filename, []byte(encoded), 0600) +} + +// load reads a file and decodes its content into session.Values. +func (s *FilesystemStore) load(session *Session) error { + filename := filepath.Join(s.path, sessionFilePrefix+filepath.Base(session.ID)) + fileMutex.RLock() + defer fileMutex.RUnlock() + fdata, err := os.ReadFile(filepath.Clean(filename)) + if err != nil { + return err + } + if err = securecookie.DecodeMulti(session.Name(), string(fdata), + &session.Values, s.Codecs...); err != nil { + return err + } + return nil +} + +// delete session file +func (s *FilesystemStore) erase(session *Session) error { + filename := filepath.Join(s.path, sessionFilePrefix+filepath.Base(session.ID)) + + fileMutex.RLock() + defer fileMutex.RUnlock() + + err := os.Remove(filename) + return err +} diff --git a/vendor/github.com/hashicorp/golang-lru/v2/.gitignore b/vendor/github.com/hashicorp/golang-lru/v2/.gitignore new file mode 100644 index 0000000..8365624 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/github.com/hashicorp/golang-lru/v2/.golangci.yml b/vendor/github.com/hashicorp/golang-lru/v2/.golangci.yml new file mode 100644 index 0000000..7e7b8a9 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/.golangci.yml @@ -0,0 +1,46 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +linters: + fast: false + disable-all: true + enable: + - revive + - megacheck + - govet + - unconvert + - gas + - gocyclo + - dupl + - misspell + - unparam + - unused + - typecheck + - ineffassign + # - stylecheck + - exportloopref + - gocritic + - nakedret + - gosimple + - prealloc + +# golangci-lint configuration file +linters-settings: + revive: + ignore-generated-header: true + severity: warning + rules: + - name: package-comments + severity: warning + disabled: true + - name: exported + severity: warning + disabled: false + arguments: ["checkPrivateReceivers", "disableStutteringCheck"] + +issues: + exclude-use-default: false + exclude-rules: + - path: _test\.go + linters: + - dupl diff --git a/vendor/github.com/hashicorp/golang-lru/v2/2q.go b/vendor/github.com/hashicorp/golang-lru/v2/2q.go new file mode 100644 index 0000000..8c95252 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/2q.go @@ -0,0 +1,267 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package lru + +import ( + "errors" + "sync" + + "github.com/hashicorp/golang-lru/v2/simplelru" +) + +const ( + // Default2QRecentRatio is the ratio of the 2Q cache dedicated + // to recently added entries that have only been accessed once. + Default2QRecentRatio = 0.25 + + // Default2QGhostEntries is the default ratio of ghost + // entries kept to track entries recently evicted + Default2QGhostEntries = 0.50 +) + +// TwoQueueCache is a thread-safe fixed size 2Q cache. +// 2Q is an enhancement over the standard LRU cache +// in that it tracks both frequently and recently used +// entries separately. This avoids a burst in access to new +// entries from evicting frequently used entries. It adds some +// additional tracking overhead to the standard LRU cache, and is +// computationally about 2x the cost, and adds some metadata over +// head. The ARCCache is similar, but does not require setting any +// parameters. +type TwoQueueCache[K comparable, V any] struct { + size int + recentSize int + recentRatio float64 + ghostRatio float64 + + recent simplelru.LRUCache[K, V] + frequent simplelru.LRUCache[K, V] + recentEvict simplelru.LRUCache[K, struct{}] + lock sync.RWMutex +} + +// New2Q creates a new TwoQueueCache using the default +// values for the parameters. +func New2Q[K comparable, V any](size int) (*TwoQueueCache[K, V], error) { + return New2QParams[K, V](size, Default2QRecentRatio, Default2QGhostEntries) +} + +// New2QParams creates a new TwoQueueCache using the provided +// parameter values. +func New2QParams[K comparable, V any](size int, recentRatio, ghostRatio float64) (*TwoQueueCache[K, V], error) { + if size <= 0 { + return nil, errors.New("invalid size") + } + if recentRatio < 0.0 || recentRatio > 1.0 { + return nil, errors.New("invalid recent ratio") + } + if ghostRatio < 0.0 || ghostRatio > 1.0 { + return nil, errors.New("invalid ghost ratio") + } + + // Determine the sub-sizes + recentSize := int(float64(size) * recentRatio) + evictSize := int(float64(size) * ghostRatio) + + // Allocate the LRUs + recent, err := simplelru.NewLRU[K, V](size, nil) + if err != nil { + return nil, err + } + frequent, err := simplelru.NewLRU[K, V](size, nil) + if err != nil { + return nil, err + } + recentEvict, err := simplelru.NewLRU[K, struct{}](evictSize, nil) + if err != nil { + return nil, err + } + + // Initialize the cache + c := &TwoQueueCache[K, V]{ + size: size, + recentSize: recentSize, + recentRatio: recentRatio, + ghostRatio: ghostRatio, + recent: recent, + frequent: frequent, + recentEvict: recentEvict, + } + return c, nil +} + +// Get looks up a key's value from the cache. +func (c *TwoQueueCache[K, V]) Get(key K) (value V, ok bool) { + c.lock.Lock() + defer c.lock.Unlock() + + // Check if this is a frequent value + if val, ok := c.frequent.Get(key); ok { + return val, ok + } + + // If the value is contained in recent, then we + // promote it to frequent + if val, ok := c.recent.Peek(key); ok { + c.recent.Remove(key) + c.frequent.Add(key, val) + return val, ok + } + + // No hit + return +} + +// Add adds a value to the cache. +func (c *TwoQueueCache[K, V]) Add(key K, value V) { + c.lock.Lock() + defer c.lock.Unlock() + + // Check if the value is frequently used already, + // and just update the value + if c.frequent.Contains(key) { + c.frequent.Add(key, value) + return + } + + // Check if the value is recently used, and promote + // the value into the frequent list + if c.recent.Contains(key) { + c.recent.Remove(key) + c.frequent.Add(key, value) + return + } + + // If the value was recently evicted, add it to the + // frequently used list + if c.recentEvict.Contains(key) { + c.ensureSpace(true) + c.recentEvict.Remove(key) + c.frequent.Add(key, value) + return + } + + // Add to the recently seen list + c.ensureSpace(false) + c.recent.Add(key, value) +} + +// ensureSpace is used to ensure we have space in the cache +func (c *TwoQueueCache[K, V]) ensureSpace(recentEvict bool) { + // If we have space, nothing to do + recentLen := c.recent.Len() + freqLen := c.frequent.Len() + if recentLen+freqLen < c.size { + return + } + + // If the recent buffer is larger than + // the target, evict from there + if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) { + k, _, _ := c.recent.RemoveOldest() + c.recentEvict.Add(k, struct{}{}) + return + } + + // Remove from the frequent list otherwise + c.frequent.RemoveOldest() +} + +// Len returns the number of items in the cache. +func (c *TwoQueueCache[K, V]) Len() int { + c.lock.RLock() + defer c.lock.RUnlock() + return c.recent.Len() + c.frequent.Len() +} + +// Resize changes the cache size. +func (c *TwoQueueCache[K, V]) Resize(size int) (evicted int) { + c.lock.Lock() + defer c.lock.Unlock() + + // Recalculate the sub-sizes + recentSize := int(float64(size) * c.recentRatio) + evictSize := int(float64(size) * c.ghostRatio) + c.size = size + c.recentSize = recentSize + + // ensureSpace + diff := c.recent.Len() + c.frequent.Len() - size + if diff < 0 { + diff = 0 + } + for i := 0; i < diff; i++ { + c.ensureSpace(true) + } + + // Reallocate the LRUs + c.recent.Resize(size) + c.frequent.Resize(size) + c.recentEvict.Resize(evictSize) + + return diff +} + +// Keys returns a slice of the keys in the cache. +// The frequently used keys are first in the returned slice. +func (c *TwoQueueCache[K, V]) Keys() []K { + c.lock.RLock() + defer c.lock.RUnlock() + k1 := c.frequent.Keys() + k2 := c.recent.Keys() + return append(k1, k2...) +} + +// Values returns a slice of the values in the cache. +// The frequently used values are first in the returned slice. +func (c *TwoQueueCache[K, V]) Values() []V { + c.lock.RLock() + defer c.lock.RUnlock() + v1 := c.frequent.Values() + v2 := c.recent.Values() + return append(v1, v2...) +} + +// Remove removes the provided key from the cache. +func (c *TwoQueueCache[K, V]) Remove(key K) { + c.lock.Lock() + defer c.lock.Unlock() + if c.frequent.Remove(key) { + return + } + if c.recent.Remove(key) { + return + } + if c.recentEvict.Remove(key) { + return + } +} + +// Purge is used to completely clear the cache. +func (c *TwoQueueCache[K, V]) Purge() { + c.lock.Lock() + defer c.lock.Unlock() + c.recent.Purge() + c.frequent.Purge() + c.recentEvict.Purge() +} + +// Contains is used to check if the cache contains a key +// without updating recency or frequency. +func (c *TwoQueueCache[K, V]) Contains(key K) bool { + c.lock.RLock() + defer c.lock.RUnlock() + return c.frequent.Contains(key) || c.recent.Contains(key) +} + +// Peek is used to inspect the cache value of a key +// without updating recency or frequency. +func (c *TwoQueueCache[K, V]) Peek(key K) (value V, ok bool) { + c.lock.RLock() + defer c.lock.RUnlock() + if val, ok := c.frequent.Peek(key); ok { + return val, ok + } + return c.recent.Peek(key) +} diff --git a/vendor/github.com/hashicorp/golang-lru/v2/LICENSE b/vendor/github.com/hashicorp/golang-lru/v2/LICENSE new file mode 100644 index 0000000..0e5d580 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/LICENSE @@ -0,0 +1,364 @@ +Copyright (c) 2014 HashiCorp, Inc. + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/golang-lru/v2/README.md b/vendor/github.com/hashicorp/golang-lru/v2/README.md new file mode 100644 index 0000000..a942eb5 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/README.md @@ -0,0 +1,79 @@ +golang-lru +========== + +This provides the `lru` package which implements a fixed-size +thread safe LRU cache. It is based on the cache in Groupcache. + +Documentation +============= + +Full docs are available on [Go Packages](https://pkg.go.dev/github.com/hashicorp/golang-lru/v2) + +LRU cache example +================= + +```go +package main + +import ( + "fmt" + "github.com/hashicorp/golang-lru/v2" +) + +func main() { + l, _ := lru.New[int, any](128) + for i := 0; i < 256; i++ { + l.Add(i, nil) + } + if l.Len() != 128 { + panic(fmt.Sprintf("bad len: %v", l.Len())) + } +} +``` + +Expirable LRU cache example +=========================== + +```go +package main + +import ( + "fmt" + "time" + + "github.com/hashicorp/golang-lru/v2/expirable" +) + +func main() { + // make cache with 10ms TTL and 5 max keys + cache := expirable.NewLRU[string, string](5, nil, time.Millisecond*10) + + + // set value under key1. + cache.Add("key1", "val1") + + // get value under key1 + r, ok := cache.Get("key1") + + // check for OK value + if ok { + fmt.Printf("value before expiration is found: %v, value: %q\n", ok, r) + } + + // wait for cache to expire + time.Sleep(time.Millisecond * 12) + + // get value under key1 after key expiration + r, ok = cache.Get("key1") + fmt.Printf("value after expiration is found: %v, value: %q\n", ok, r) + + // set value under key2, would evict old entry because it is already expired. + cache.Add("key2", "val2") + + fmt.Printf("Cache len: %d\n", cache.Len()) + // Output: + // value before expiration is found: true, value: "val1" + // value after expiration is found: false, value: "" + // Cache len: 1 +} +``` diff --git a/vendor/github.com/hashicorp/golang-lru/v2/doc.go b/vendor/github.com/hashicorp/golang-lru/v2/doc.go new file mode 100644 index 0000000..24107ee --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/doc.go @@ -0,0 +1,24 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package lru provides three different LRU caches of varying sophistication. +// +// Cache is a simple LRU cache. It is based on the LRU implementation in +// groupcache: https://github.com/golang/groupcache/tree/master/lru +// +// TwoQueueCache tracks frequently used and recently used entries separately. +// This avoids a burst of accesses from taking out frequently used entries, at +// the cost of about 2x computational overhead and some extra bookkeeping. +// +// ARCCache is an adaptive replacement cache. It tracks recent evictions as well +// as recent usage in both the frequent and recent caches. Its computational +// overhead is comparable to TwoQueueCache, but the memory overhead is linear +// with the size of the cache. +// +// ARC has been patented by IBM, so do not use it if that is problematic for +// your program. For this reason, it is in a separate go module contained within +// this repository. +// +// All caches in this package take locks while operating, and are therefore +// thread-safe for consumers. +package lru diff --git a/vendor/github.com/hashicorp/golang-lru/v2/internal/list.go b/vendor/github.com/hashicorp/golang-lru/v2/internal/list.go new file mode 100644 index 0000000..5cd74a0 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/internal/list.go @@ -0,0 +1,142 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE_list file. + +package internal + +import "time" + +// Entry is an LRU Entry +type Entry[K comparable, V any] struct { + // Next and previous pointers in the doubly-linked list of elements. + // To simplify the implementation, internally a list l is implemented + // as a ring, such that &l.root is both the next element of the last + // list element (l.Back()) and the previous element of the first list + // element (l.Front()). + next, prev *Entry[K, V] + + // The list to which this element belongs. + list *LruList[K, V] + + // The LRU Key of this element. + Key K + + // The Value stored with this element. + Value V + + // The time this element would be cleaned up, optional + ExpiresAt time.Time + + // The expiry bucket item was put in, optional + ExpireBucket uint8 +} + +// PrevEntry returns the previous list element or nil. +func (e *Entry[K, V]) PrevEntry() *Entry[K, V] { + if p := e.prev; e.list != nil && p != &e.list.root { + return p + } + return nil +} + +// LruList represents a doubly linked list. +// The zero Value for LruList is an empty list ready to use. +type LruList[K comparable, V any] struct { + root Entry[K, V] // sentinel list element, only &root, root.prev, and root.next are used + len int // current list Length excluding (this) sentinel element +} + +// Init initializes or clears list l. +func (l *LruList[K, V]) Init() *LruList[K, V] { + l.root.next = &l.root + l.root.prev = &l.root + l.len = 0 + return l +} + +// NewList returns an initialized list. +func NewList[K comparable, V any]() *LruList[K, V] { return new(LruList[K, V]).Init() } + +// Length returns the number of elements of list l. +// The complexity is O(1). +func (l *LruList[K, V]) Length() int { return l.len } + +// Back returns the last element of list l or nil if the list is empty. +func (l *LruList[K, V]) Back() *Entry[K, V] { + if l.len == 0 { + return nil + } + return l.root.prev +} + +// lazyInit lazily initializes a zero List Value. +func (l *LruList[K, V]) lazyInit() { + if l.root.next == nil { + l.Init() + } +} + +// insert inserts e after at, increments l.len, and returns e. +func (l *LruList[K, V]) insert(e, at *Entry[K, V]) *Entry[K, V] { + e.prev = at + e.next = at.next + e.prev.next = e + e.next.prev = e + e.list = l + l.len++ + return e +} + +// insertValue is a convenience wrapper for insert(&Entry{Value: v, ExpiresAt: ExpiresAt}, at). +func (l *LruList[K, V]) insertValue(k K, v V, expiresAt time.Time, at *Entry[K, V]) *Entry[K, V] { + return l.insert(&Entry[K, V]{Value: v, Key: k, ExpiresAt: expiresAt}, at) +} + +// Remove removes e from its list, decrements l.len +func (l *LruList[K, V]) Remove(e *Entry[K, V]) V { + e.prev.next = e.next + e.next.prev = e.prev + e.next = nil // avoid memory leaks + e.prev = nil // avoid memory leaks + e.list = nil + l.len-- + + return e.Value +} + +// move moves e to next to at. +func (l *LruList[K, V]) move(e, at *Entry[K, V]) { + if e == at { + return + } + e.prev.next = e.next + e.next.prev = e.prev + + e.prev = at + e.next = at.next + e.prev.next = e + e.next.prev = e +} + +// PushFront inserts a new element e with value v at the front of list l and returns e. +func (l *LruList[K, V]) PushFront(k K, v V) *Entry[K, V] { + l.lazyInit() + return l.insertValue(k, v, time.Time{}, &l.root) +} + +// PushFrontExpirable inserts a new expirable element e with Value v at the front of list l and returns e. +func (l *LruList[K, V]) PushFrontExpirable(k K, v V, expiresAt time.Time) *Entry[K, V] { + l.lazyInit() + return l.insertValue(k, v, expiresAt, &l.root) +} + +// MoveToFront moves element e to the front of list l. +// If e is not an element of l, the list is not modified. +// The element must not be nil. +func (l *LruList[K, V]) MoveToFront(e *Entry[K, V]) { + if e.list != l || l.root.next == e { + return + } + // see comment in List.Remove about initialization of l + l.move(e, &l.root) +} diff --git a/vendor/github.com/hashicorp/golang-lru/v2/lru.go b/vendor/github.com/hashicorp/golang-lru/v2/lru.go new file mode 100644 index 0000000..a2655f1 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/lru.go @@ -0,0 +1,250 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package lru + +import ( + "sync" + + "github.com/hashicorp/golang-lru/v2/simplelru" +) + +const ( + // DefaultEvictedBufferSize defines the default buffer size to store evicted key/val + DefaultEvictedBufferSize = 16 +) + +// Cache is a thread-safe fixed size LRU cache. +type Cache[K comparable, V any] struct { + lru *simplelru.LRU[K, V] + evictedKeys []K + evictedVals []V + onEvictedCB func(k K, v V) + lock sync.RWMutex +} + +// New creates an LRU of the given size. +func New[K comparable, V any](size int) (*Cache[K, V], error) { + return NewWithEvict[K, V](size, nil) +} + +// NewWithEvict constructs a fixed size cache with the given eviction +// callback. +func NewWithEvict[K comparable, V any](size int, onEvicted func(key K, value V)) (c *Cache[K, V], err error) { + // create a cache with default settings + c = &Cache[K, V]{ + onEvictedCB: onEvicted, + } + if onEvicted != nil { + c.initEvictBuffers() + onEvicted = c.onEvicted + } + c.lru, err = simplelru.NewLRU(size, onEvicted) + return +} + +func (c *Cache[K, V]) initEvictBuffers() { + c.evictedKeys = make([]K, 0, DefaultEvictedBufferSize) + c.evictedVals = make([]V, 0, DefaultEvictedBufferSize) +} + +// onEvicted save evicted key/val and sent in externally registered callback +// outside of critical section +func (c *Cache[K, V]) onEvicted(k K, v V) { + c.evictedKeys = append(c.evictedKeys, k) + c.evictedVals = append(c.evictedVals, v) +} + +// Purge is used to completely clear the cache. +func (c *Cache[K, V]) Purge() { + var ks []K + var vs []V + c.lock.Lock() + c.lru.Purge() + if c.onEvictedCB != nil && len(c.evictedKeys) > 0 { + ks, vs = c.evictedKeys, c.evictedVals + c.initEvictBuffers() + } + c.lock.Unlock() + // invoke callback outside of critical section + if c.onEvictedCB != nil { + for i := 0; i < len(ks); i++ { + c.onEvictedCB(ks[i], vs[i]) + } + } +} + +// Add adds a value to the cache. Returns true if an eviction occurred. +func (c *Cache[K, V]) Add(key K, value V) (evicted bool) { + var k K + var v V + c.lock.Lock() + evicted = c.lru.Add(key, value) + if c.onEvictedCB != nil && evicted { + k, v = c.evictedKeys[0], c.evictedVals[0] + c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0] + } + c.lock.Unlock() + if c.onEvictedCB != nil && evicted { + c.onEvictedCB(k, v) + } + return +} + +// Get looks up a key's value from the cache. +func (c *Cache[K, V]) Get(key K) (value V, ok bool) { + c.lock.Lock() + value, ok = c.lru.Get(key) + c.lock.Unlock() + return value, ok +} + +// Contains checks if a key is in the cache, without updating the +// recent-ness or deleting it for being stale. +func (c *Cache[K, V]) Contains(key K) bool { + c.lock.RLock() + containKey := c.lru.Contains(key) + c.lock.RUnlock() + return containKey +} + +// Peek returns the key value (or undefined if not found) without updating +// the "recently used"-ness of the key. +func (c *Cache[K, V]) Peek(key K) (value V, ok bool) { + c.lock.RLock() + value, ok = c.lru.Peek(key) + c.lock.RUnlock() + return value, ok +} + +// ContainsOrAdd checks if a key is in the cache without updating the +// recent-ness or deleting it for being stale, and if not, adds the value. +// Returns whether found and whether an eviction occurred. +func (c *Cache[K, V]) ContainsOrAdd(key K, value V) (ok, evicted bool) { + var k K + var v V + c.lock.Lock() + if c.lru.Contains(key) { + c.lock.Unlock() + return true, false + } + evicted = c.lru.Add(key, value) + if c.onEvictedCB != nil && evicted { + k, v = c.evictedKeys[0], c.evictedVals[0] + c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0] + } + c.lock.Unlock() + if c.onEvictedCB != nil && evicted { + c.onEvictedCB(k, v) + } + return false, evicted +} + +// PeekOrAdd checks if a key is in the cache without updating the +// recent-ness or deleting it for being stale, and if not, adds the value. +// Returns whether found and whether an eviction occurred. +func (c *Cache[K, V]) PeekOrAdd(key K, value V) (previous V, ok, evicted bool) { + var k K + var v V + c.lock.Lock() + previous, ok = c.lru.Peek(key) + if ok { + c.lock.Unlock() + return previous, true, false + } + evicted = c.lru.Add(key, value) + if c.onEvictedCB != nil && evicted { + k, v = c.evictedKeys[0], c.evictedVals[0] + c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0] + } + c.lock.Unlock() + if c.onEvictedCB != nil && evicted { + c.onEvictedCB(k, v) + } + return +} + +// Remove removes the provided key from the cache. +func (c *Cache[K, V]) Remove(key K) (present bool) { + var k K + var v V + c.lock.Lock() + present = c.lru.Remove(key) + if c.onEvictedCB != nil && present { + k, v = c.evictedKeys[0], c.evictedVals[0] + c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0] + } + c.lock.Unlock() + if c.onEvictedCB != nil && present { + c.onEvictedCB(k, v) + } + return +} + +// Resize changes the cache size. +func (c *Cache[K, V]) Resize(size int) (evicted int) { + var ks []K + var vs []V + c.lock.Lock() + evicted = c.lru.Resize(size) + if c.onEvictedCB != nil && evicted > 0 { + ks, vs = c.evictedKeys, c.evictedVals + c.initEvictBuffers() + } + c.lock.Unlock() + if c.onEvictedCB != nil && evicted > 0 { + for i := 0; i < len(ks); i++ { + c.onEvictedCB(ks[i], vs[i]) + } + } + return evicted +} + +// RemoveOldest removes the oldest item from the cache. +func (c *Cache[K, V]) RemoveOldest() (key K, value V, ok bool) { + var k K + var v V + c.lock.Lock() + key, value, ok = c.lru.RemoveOldest() + if c.onEvictedCB != nil && ok { + k, v = c.evictedKeys[0], c.evictedVals[0] + c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0] + } + c.lock.Unlock() + if c.onEvictedCB != nil && ok { + c.onEvictedCB(k, v) + } + return +} + +// GetOldest returns the oldest entry +func (c *Cache[K, V]) GetOldest() (key K, value V, ok bool) { + c.lock.RLock() + key, value, ok = c.lru.GetOldest() + c.lock.RUnlock() + return +} + +// Keys returns a slice of the keys in the cache, from oldest to newest. +func (c *Cache[K, V]) Keys() []K { + c.lock.RLock() + keys := c.lru.Keys() + c.lock.RUnlock() + return keys +} + +// Values returns a slice of the values in the cache, from oldest to newest. +func (c *Cache[K, V]) Values() []V { + c.lock.RLock() + values := c.lru.Values() + c.lock.RUnlock() + return values +} + +// Len returns the number of items in the cache. +func (c *Cache[K, V]) Len() int { + c.lock.RLock() + length := c.lru.Len() + c.lock.RUnlock() + return length +} diff --git a/vendor/github.com/hashicorp/golang-lru/v2/simplelru/LICENSE_list b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/LICENSE_list new file mode 100644 index 0000000..c4764e6 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/LICENSE_list @@ -0,0 +1,29 @@ +This license applies to simplelru/list.go + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru.go new file mode 100644 index 0000000..f697923 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru.go @@ -0,0 +1,177 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package simplelru + +import ( + "errors" + + "github.com/hashicorp/golang-lru/v2/internal" +) + +// EvictCallback is used to get a callback when a cache entry is evicted +type EvictCallback[K comparable, V any] func(key K, value V) + +// LRU implements a non-thread safe fixed size LRU cache +type LRU[K comparable, V any] struct { + size int + evictList *internal.LruList[K, V] + items map[K]*internal.Entry[K, V] + onEvict EvictCallback[K, V] +} + +// NewLRU constructs an LRU of the given size +func NewLRU[K comparable, V any](size int, onEvict EvictCallback[K, V]) (*LRU[K, V], error) { + if size <= 0 { + return nil, errors.New("must provide a positive size") + } + + c := &LRU[K, V]{ + size: size, + evictList: internal.NewList[K, V](), + items: make(map[K]*internal.Entry[K, V]), + onEvict: onEvict, + } + return c, nil +} + +// Purge is used to completely clear the cache. +func (c *LRU[K, V]) Purge() { + for k, v := range c.items { + if c.onEvict != nil { + c.onEvict(k, v.Value) + } + delete(c.items, k) + } + c.evictList.Init() +} + +// Add adds a value to the cache. Returns true if an eviction occurred. +func (c *LRU[K, V]) Add(key K, value V) (evicted bool) { + // Check for existing item + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + ent.Value = value + return false + } + + // Add new item + ent := c.evictList.PushFront(key, value) + c.items[key] = ent + + evict := c.evictList.Length() > c.size + // Verify size not exceeded + if evict { + c.removeOldest() + } + return evict +} + +// Get looks up a key's value from the cache. +func (c *LRU[K, V]) Get(key K) (value V, ok bool) { + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + return ent.Value, true + } + return +} + +// Contains checks if a key is in the cache, without updating the recent-ness +// or deleting it for being stale. +func (c *LRU[K, V]) Contains(key K) (ok bool) { + _, ok = c.items[key] + return ok +} + +// Peek returns the key value (or undefined if not found) without updating +// the "recently used"-ness of the key. +func (c *LRU[K, V]) Peek(key K) (value V, ok bool) { + var ent *internal.Entry[K, V] + if ent, ok = c.items[key]; ok { + return ent.Value, true + } + return +} + +// Remove removes the provided key from the cache, returning if the +// key was contained. +func (c *LRU[K, V]) Remove(key K) (present bool) { + if ent, ok := c.items[key]; ok { + c.removeElement(ent) + return true + } + return false +} + +// RemoveOldest removes the oldest item from the cache. +func (c *LRU[K, V]) RemoveOldest() (key K, value V, ok bool) { + if ent := c.evictList.Back(); ent != nil { + c.removeElement(ent) + return ent.Key, ent.Value, true + } + return +} + +// GetOldest returns the oldest entry +func (c *LRU[K, V]) GetOldest() (key K, value V, ok bool) { + if ent := c.evictList.Back(); ent != nil { + return ent.Key, ent.Value, true + } + return +} + +// Keys returns a slice of the keys in the cache, from oldest to newest. +func (c *LRU[K, V]) Keys() []K { + keys := make([]K, c.evictList.Length()) + i := 0 + for ent := c.evictList.Back(); ent != nil; ent = ent.PrevEntry() { + keys[i] = ent.Key + i++ + } + return keys +} + +// Values returns a slice of the values in the cache, from oldest to newest. +func (c *LRU[K, V]) Values() []V { + values := make([]V, len(c.items)) + i := 0 + for ent := c.evictList.Back(); ent != nil; ent = ent.PrevEntry() { + values[i] = ent.Value + i++ + } + return values +} + +// Len returns the number of items in the cache. +func (c *LRU[K, V]) Len() int { + return c.evictList.Length() +} + +// Resize changes the cache size. +func (c *LRU[K, V]) Resize(size int) (evicted int) { + diff := c.Len() - size + if diff < 0 { + diff = 0 + } + for i := 0; i < diff; i++ { + c.removeOldest() + } + c.size = size + return diff +} + +// removeOldest removes the oldest item from the cache. +func (c *LRU[K, V]) removeOldest() { + if ent := c.evictList.Back(); ent != nil { + c.removeElement(ent) + } +} + +// removeElement is used to remove a given list element from the cache +func (c *LRU[K, V]) removeElement(e *internal.Entry[K, V]) { + c.evictList.Remove(e) + delete(c.items, e.Key) + if c.onEvict != nil { + c.onEvict(e.Key, e.Value) + } +} diff --git a/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru_interface.go new file mode 100644 index 0000000..043b8bc --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru_interface.go @@ -0,0 +1,46 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package simplelru provides simple LRU implementation based on build-in container/list. +package simplelru + +// LRUCache is the interface for simple LRU cache. +type LRUCache[K comparable, V any] interface { + // Adds a value to the cache, returns true if an eviction occurred and + // updates the "recently used"-ness of the key. + Add(key K, value V) bool + + // Returns key's value from the cache and + // updates the "recently used"-ness of the key. #value, isFound + Get(key K) (value V, ok bool) + + // Checks if a key exists in cache without updating the recent-ness. + Contains(key K) (ok bool) + + // Returns key's value without updating the "recently used"-ness of the key. + Peek(key K) (value V, ok bool) + + // Removes a key from the cache. + Remove(key K) bool + + // Removes the oldest entry from cache. + RemoveOldest() (K, V, bool) + + // Returns the oldest entry from the cache. #key, value, isFound + GetOldest() (K, V, bool) + + // Returns a slice of the keys in the cache, from oldest to newest. + Keys() []K + + // Values returns a slice of the values in the cache, from oldest to newest. + Values() []V + + // Returns the number of items in the cache. + Len() int + + // Clears all cache entries. + Purge() + + // Resizes cache, returning number evicted + Resize(int) int +} diff --git a/vendor/github.com/mattn/go-isatty/LICENSE b/vendor/github.com/mattn/go-isatty/LICENSE new file mode 100644 index 0000000..65dc692 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) Yasuhiro MATSUMOTO + +MIT License (Expat) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md new file mode 100644 index 0000000..3841835 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/README.md @@ -0,0 +1,50 @@ +# go-isatty + +[![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty) +[![Codecov](https://codecov.io/gh/mattn/go-isatty/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-isatty) +[![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master) +[![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty) + +isatty for golang + +## Usage + +```go +package main + +import ( + "fmt" + "github.com/mattn/go-isatty" + "os" +) + +func main() { + if isatty.IsTerminal(os.Stdout.Fd()) { + fmt.Println("Is Terminal") + } else if isatty.IsCygwinTerminal(os.Stdout.Fd()) { + fmt.Println("Is Cygwin/MSYS2 Terminal") + } else { + fmt.Println("Is Not Terminal") + } +} +``` + +## Installation + +``` +$ go get github.com/mattn/go-isatty +``` + +## License + +MIT + +## Author + +Yasuhiro Matsumoto (a.k.a mattn) + +## Thanks + +* k-takata: base idea for IsCygwinTerminal + + https://github.com/k-takata/go-iscygpty diff --git a/vendor/github.com/mattn/go-isatty/doc.go b/vendor/github.com/mattn/go-isatty/doc.go new file mode 100644 index 0000000..17d4f90 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/doc.go @@ -0,0 +1,2 @@ +// Package isatty implements interface to isatty +package isatty diff --git a/vendor/github.com/mattn/go-isatty/go.test.sh b/vendor/github.com/mattn/go-isatty/go.test.sh new file mode 100644 index 0000000..012162b --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -race -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go new file mode 100644 index 0000000..d0ea68f --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -0,0 +1,20 @@ +//go:build (darwin || freebsd || openbsd || netbsd || dragonfly || hurd) && !appengine && !tinygo +// +build darwin freebsd openbsd netbsd dragonfly hurd +// +build !appengine +// +build !tinygo + +package isatty + +import "golang.org/x/sys/unix" + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + _, err := unix.IoctlGetTermios(int(fd), unix.TIOCGETA) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go new file mode 100644 index 0000000..7402e06 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_others.go @@ -0,0 +1,17 @@ +//go:build (appengine || js || nacl || tinygo || wasm) && !windows +// +build appengine js nacl tinygo wasm +// +build !windows + +package isatty + +// IsTerminal returns true if the file descriptor is terminal which +// is always false on js and appengine classic which is a sandboxed PaaS. +func IsTerminal(fd uintptr) bool { + return false +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_plan9.go b/vendor/github.com/mattn/go-isatty/isatty_plan9.go new file mode 100644 index 0000000..bae7f9b --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_plan9.go @@ -0,0 +1,23 @@ +//go:build plan9 +// +build plan9 + +package isatty + +import ( + "syscall" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + path, err := syscall.Fd2path(int(fd)) + if err != nil { + return false + } + return path == "/dev/cons" || path == "/mnt/term/dev/cons" +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go new file mode 100644 index 0000000..0c3acf2 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_solaris.go @@ -0,0 +1,21 @@ +//go:build solaris && !appengine +// +build solaris,!appengine + +package isatty + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +// see: https://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libc/port/gen/isatty.c +func IsTerminal(fd uintptr) bool { + _, err := unix.IoctlGetTermio(int(fd), unix.TCGETA) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go new file mode 100644 index 0000000..0337d8c --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go @@ -0,0 +1,20 @@ +//go:build (linux || aix || zos) && !appengine && !tinygo +// +build linux aix zos +// +build !appengine +// +build !tinygo + +package isatty + +import "golang.org/x/sys/unix" + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + _, err := unix.IoctlGetTermios(int(fd), unix.TCGETS) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go new file mode 100644 index 0000000..8e3c991 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_windows.go @@ -0,0 +1,125 @@ +//go:build windows && !appengine +// +build windows,!appengine + +package isatty + +import ( + "errors" + "strings" + "syscall" + "unicode/utf16" + "unsafe" +) + +const ( + objectNameInfo uintptr = 1 + fileNameInfo = 2 + fileTypePipe = 3 +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + ntdll = syscall.NewLazyDLL("ntdll.dll") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx") + procGetFileType = kernel32.NewProc("GetFileType") + procNtQueryObject = ntdll.NewProc("NtQueryObject") +) + +func init() { + // Check if GetFileInformationByHandleEx is available. + if procGetFileInformationByHandleEx.Find() != nil { + procGetFileInformationByHandleEx = nil + } +} + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + +// Check pipe name is used for cygwin/msys2 pty. +// Cygwin/MSYS2 PTY has a name like: +// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master +func isCygwinPipeName(name string) bool { + token := strings.Split(name, "-") + if len(token) < 5 { + return false + } + + if token[0] != `\msys` && + token[0] != `\cygwin` && + token[0] != `\Device\NamedPipe\msys` && + token[0] != `\Device\NamedPipe\cygwin` { + return false + } + + if token[1] == "" { + return false + } + + if !strings.HasPrefix(token[2], "pty") { + return false + } + + if token[3] != `from` && token[3] != `to` { + return false + } + + if token[4] != "master" { + return false + } + + return true +} + +// getFileNameByHandle use the undocomented ntdll NtQueryObject to get file full name from file handler +// since GetFileInformationByHandleEx is not available under windows Vista and still some old fashion +// guys are using Windows XP, this is a workaround for those guys, it will also work on system from +// Windows vista to 10 +// see https://stackoverflow.com/a/18792477 for details +func getFileNameByHandle(fd uintptr) (string, error) { + if procNtQueryObject == nil { + return "", errors.New("ntdll.dll: NtQueryObject not supported") + } + + var buf [4 + syscall.MAX_PATH]uint16 + var result int + r, _, e := syscall.Syscall6(procNtQueryObject.Addr(), 5, + fd, objectNameInfo, uintptr(unsafe.Pointer(&buf)), uintptr(2*len(buf)), uintptr(unsafe.Pointer(&result)), 0) + if r != 0 { + return "", e + } + return string(utf16.Decode(buf[4 : 4+buf[0]/2])), nil +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. +func IsCygwinTerminal(fd uintptr) bool { + if procGetFileInformationByHandleEx == nil { + name, err := getFileNameByHandle(fd) + if err != nil { + return false + } + return isCygwinPipeName(name) + } + + // Cygwin/msys's pty is a pipe. + ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0) + if ft != fileTypePipe || e != 0 { + return false + } + + var buf [2 + syscall.MAX_PATH]uint16 + r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), + 4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)), + uintptr(len(buf)*2), 0, 0) + if r == 0 || e != 0 { + return false + } + + l := *(*uint32)(unsafe.Pointer(&buf)) + return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2]))) +} diff --git a/vendor/github.com/ncruces/go-strftime/.gitignore b/vendor/github.com/ncruces/go-strftime/.gitignore new file mode 100644 index 0000000..66fd13c --- /dev/null +++ b/vendor/github.com/ncruces/go-strftime/.gitignore @@ -0,0 +1,15 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ diff --git a/vendor/github.com/ncruces/go-strftime/LICENSE b/vendor/github.com/ncruces/go-strftime/LICENSE new file mode 100644 index 0000000..7f0f553 --- /dev/null +++ b/vendor/github.com/ncruces/go-strftime/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Nuno Cruces + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/ncruces/go-strftime/README.md b/vendor/github.com/ncruces/go-strftime/README.md new file mode 100644 index 0000000..5b0573c --- /dev/null +++ b/vendor/github.com/ncruces/go-strftime/README.md @@ -0,0 +1,5 @@ +# `strftime`/`strptime` compatible time formatting and parsing for Go + +[![Go Reference](https://pkg.go.dev/badge/image)](https://pkg.go.dev/github.com/ncruces/go-strftime) +[![Go Report](https://goreportcard.com/badge/github.com/ncruces/go-strftime)](https://goreportcard.com/report/github.com/ncruces/go-strftime) +[![Go Coverage](https://github.com/ncruces/go-strftime/wiki/coverage.svg)](https://raw.githack.com/wiki/ncruces/go-strftime/coverage.html) \ No newline at end of file diff --git a/vendor/github.com/ncruces/go-strftime/parser.go b/vendor/github.com/ncruces/go-strftime/parser.go new file mode 100644 index 0000000..b006de3 --- /dev/null +++ b/vendor/github.com/ncruces/go-strftime/parser.go @@ -0,0 +1,107 @@ +package strftime + +import "unicode/utf8" + +type parser struct { + format func(spec, flag byte) error + literal func(byte) error +} + +func (p *parser) parse(fmt string) error { + const ( + initial = iota + percent + flagged + modified + ) + + var flag, modifier byte + var err error + state := initial + start := 0 + for i, b := range []byte(fmt) { + switch state { + default: + if b == '%' { + state = percent + start = i + continue + } + err = p.literal(b) + + case percent: + if b == '-' || b == ':' { + state = flagged + flag = b + continue + } + if b == 'E' || b == 'O' { + state = modified + modifier = b + flag = 0 + continue + } + err = p.format(b, 0) + state = initial + + case flagged: + if b == 'E' || b == 'O' { + state = modified + modifier = b + continue + } + err = p.format(b, flag) + state = initial + + case modified: + if okModifier(modifier, b) { + err = p.format(b, flag) + } else { + err = p.literals(fmt[start : i+1]) + } + state = initial + } + + if err != nil { + if err, ok := err.(formatError); ok { + err.setDirective(fmt, start, i) + return err + } + return err + } + } + + if state != initial { + return p.literals(fmt[start:]) + } + return nil +} + +func (p *parser) literals(literal string) error { + for _, b := range []byte(literal) { + if err := p.literal(b); err != nil { + return err + } + } + return nil +} + +type literalErr string + +func (e literalErr) Error() string { + return "strftime: unsupported literal: " + string(e) +} + +type formatError struct { + message string + directive string +} + +func (e formatError) Error() string { + return "strftime: unsupported directive: " + e.directive + " " + e.message +} + +func (e *formatError) setDirective(str string, i, j int) { + _, n := utf8.DecodeRuneInString(str[j:]) + e.directive = str[i : j+n] +} diff --git a/vendor/github.com/ncruces/go-strftime/pkg.go b/vendor/github.com/ncruces/go-strftime/pkg.go new file mode 100644 index 0000000..9a3bbd3 --- /dev/null +++ b/vendor/github.com/ncruces/go-strftime/pkg.go @@ -0,0 +1,96 @@ +/* +Package strftime provides strftime/strptime compatible time formatting and parsing. + +The following specifiers are available: + + Date (Year, Month, Day): + %Y - Year with century (can be negative, 4 digits at least) + -0001, 0000, 1995, 2009, 14292, etc. + %C - year / 100 (round down, 20 in 2009) + %y - year % 100 (00..99) + + %m - Month of the year, zero-padded (01..12) + %-m no-padded (1..12) + %B - Full month name (January) + %b - Abbreviated month name (Jan) + %h - Equivalent to %b + + %d - Day of the month, zero-padded (01..31) + %-d no-padded (1..31) + %e - Day of the month, blank-padded ( 1..31) + + %j - Day of the year (001..366) + %-j no-padded (1..366) + + Time (Hour, Minute, Second, Subsecond): + %H - Hour of the day, 24-hour clock, zero-padded (00..23) + %-H no-padded (0..23) + %k - Hour of the day, 24-hour clock, blank-padded ( 0..23) + %I - Hour of the day, 12-hour clock, zero-padded (01..12) + %-I no-padded (1..12) + %l - Hour of the day, 12-hour clock, blank-padded ( 1..12) + %P - Meridian indicator, lowercase (am or pm) + %p - Meridian indicator, uppercase (AM or PM) + + %M - Minute of the hour (00..59) + %-M no-padded (0..59) + + %S - Second of the minute (00..60) + %-S no-padded (0..60) + + %L - Millisecond of the second (000..999) + %f - Microsecond of the second (000000..999999) + %N - Nanosecond of the second (000000000..999999999) + + Time zone: + %z - Time zone as hour and minute offset from UTC (e.g. +0900) + %:z - hour and minute offset from UTC with a colon (e.g. +09:00) + %Z - Time zone abbreviation (e.g. MST) + + Weekday: + %A - Full weekday name (Sunday) + %a - Abbreviated weekday name (Sun) + %u - Day of the week (Monday is 1, 1..7) + %w - Day of the week (Sunday is 0, 0..6) + + ISO 8601 week-based year and week number: + Week 1 of YYYY starts with a Monday and includes YYYY-01-04. + The days in the year before the first week are in the last week of + the previous year. + %G - Week-based year + %g - Last 2 digits of the week-based year (00..99) + %V - Week number of the week-based year (01..53) + %-V no-padded (1..53) + + Week number: + Week 1 of YYYY starts with a Sunday or Monday (according to %U or %W). + The days in the year before the first week are in week 0. + %U - Week number of the year. The week starts with Sunday. (00..53) + %-U no-padded (0..53) + %W - Week number of the year. The week starts with Monday. (00..53) + %-W no-padded (0..53) + + Seconds since the Unix Epoch: + %s - Number of seconds since 1970-01-01 00:00:00 UTC. + %Q - Number of milliseconds since 1970-01-01 00:00:00 UTC. + + Literal string: + %n - Newline character (\n) + %t - Tab character (\t) + %% - Literal % character + + Combination: + %c - date and time (%a %b %e %T %Y) + %D - Date (%m/%d/%y) + %F - ISO 8601 date format (%Y-%m-%d) + %v - VMS date (%e-%b-%Y) + %x - Same as %D + %X - Same as %T + %r - 12-hour time (%I:%M:%S %p) + %R - 24-hour time (%H:%M) + %T - 24-hour time (%H:%M:%S) + %+ - date(1) (%a %b %e %H:%M:%S %Z %Y) + +The modifiers ``E'' and ``O'' are ignored. +*/ +package strftime diff --git a/vendor/github.com/ncruces/go-strftime/specifiers.go b/vendor/github.com/ncruces/go-strftime/specifiers.go new file mode 100644 index 0000000..065f779 --- /dev/null +++ b/vendor/github.com/ncruces/go-strftime/specifiers.go @@ -0,0 +1,241 @@ +package strftime + +import "strings" + +// https://strftime.org/ +func goLayout(spec, flag byte, parsing bool) string { + switch spec { + default: + return "" + + case 'B': + return "January" + case 'b', 'h': + return "Jan" + case 'm': + if flag == '-' || parsing { + return "1" + } + return "01" + case 'A': + return "Monday" + case 'a': + return "Mon" + case 'e': + return "_2" + case 'd': + if flag == '-' || parsing { + return "2" + } + return "02" + case 'j': + if flag == '-' { + if parsing { + return "__2" + } + return "" + } + return "002" + case 'I': + if flag == '-' || parsing { + return "3" + } + return "03" + case 'H': + if flag == '-' && !parsing { + return "" + } + return "15" + case 'M': + if flag == '-' || parsing { + return "4" + } + return "04" + case 'S': + if flag == '-' || parsing { + return "5" + } + return "05" + case 'y': + return "06" + case 'Y': + return "2006" + case 'p': + return "PM" + case 'P': + return "pm" + case 'Z': + return "MST" + case 'z': + if flag == ':' { + if parsing { + return "Z07:00" + } + return "-07:00" + } + if parsing { + return "Z0700" + } + return "-0700" + + case '+': + if parsing { + return "Mon Jan _2 15:4:5 MST 2006" + } + return "Mon Jan _2 15:04:05 MST 2006" + case 'c': + if parsing { + return "Mon Jan _2 15:4:5 2006" + } + return "Mon Jan _2 15:04:05 2006" + case 'v': + return "_2-Jan-2006" + case 'F': + if parsing { + return "2006-1-2" + } + return "2006-01-02" + case 'D', 'x': + if parsing { + return "1/2/06" + } + return "01/02/06" + case 'r': + if parsing { + return "3:4:5 PM" + } + return "03:04:05 PM" + case 'T', 'X': + if parsing { + return "15:4:5" + } + return "15:04:05" + case 'R': + if parsing { + return "15:4" + } + return "15:04" + + case '%': + return "%" + case 't': + return "\t" + case 'n': + return "\n" + } +} + +// https://nsdateformatter.com/ +func uts35Pattern(spec, flag byte) string { + switch spec { + default: + return "" + + case 'B': + return "MMMM" + case 'b', 'h': + return "MMM" + case 'm': + if flag == '-' { + return "M" + } + return "MM" + case 'A': + return "EEEE" + case 'a': + return "E" + case 'd': + if flag == '-' { + return "d" + } + return "dd" + case 'j': + if flag == '-' { + return "D" + } + return "DDD" + case 'I': + if flag == '-' { + return "h" + } + return "hh" + case 'H': + if flag == '-' { + return "H" + } + return "HH" + case 'M': + if flag == '-' { + return "m" + } + return "mm" + case 'S': + if flag == '-' { + return "s" + } + return "ss" + case 'y': + return "yy" + case 'Y': + return "yyyy" + case 'g': + return "YY" + case 'G': + return "YYYY" + case 'V': + if flag == '-' { + return "w" + } + return "ww" + case 'p': + return "a" + case 'Z': + return "zzz" + case 'z': + if flag == ':' { + return "xxx" + } + return "xx" + case 'L': + return "SSS" + case 'f': + return "SSSSSS" + case 'N': + return "SSSSSSSSS" + + case '+': + return "E MMM d HH:mm:ss zzz yyyy" + case 'c': + return "E MMM d HH:mm:ss yyyy" + case 'v': + return "d-MMM-yyyy" + case 'F': + return "yyyy-MM-dd" + case 'D', 'x': + return "MM/dd/yy" + case 'r': + return "hh:mm:ss a" + case 'T', 'X': + return "HH:mm:ss" + case 'R': + return "HH:mm" + + case '%': + return "%" + case 't': + return "\t" + case 'n': + return "\n" + } +} + +// http://man.he.net/man3/strftime +func okModifier(mod, spec byte) bool { + if mod == 'E' { + return strings.Contains("cCxXyY", string(spec)) + } + if mod == 'O' { + return strings.Contains("deHImMSuUVwWy", string(spec)) + } + return false +} diff --git a/vendor/github.com/ncruces/go-strftime/strftime.go b/vendor/github.com/ncruces/go-strftime/strftime.go new file mode 100644 index 0000000..5308ef7 --- /dev/null +++ b/vendor/github.com/ncruces/go-strftime/strftime.go @@ -0,0 +1,324 @@ +package strftime + +import ( + "bytes" + "strconv" + "time" +) + +// Format returns a textual representation of the time value +// formatted according to the strftime format specification. +func Format(fmt string, t time.Time) string { + buf := buffer(fmt) + return string(AppendFormat(buf, fmt, t)) +} + +// AppendFormat is like Format, but appends the textual representation +// to dst and returns the extended buffer. +func AppendFormat(dst []byte, fmt string, t time.Time) []byte { + var parser parser + + parser.literal = func(b byte) error { + dst = append(dst, b) + return nil + } + + parser.format = func(spec, flag byte) error { + switch spec { + case 'A': + dst = append(dst, t.Weekday().String()...) + return nil + case 'a': + dst = append(dst, t.Weekday().String()[:3]...) + return nil + case 'B': + dst = append(dst, t.Month().String()...) + return nil + case 'b', 'h': + dst = append(dst, t.Month().String()[:3]...) + return nil + case 'm': + dst = appendInt2(dst, int(t.Month()), flag) + return nil + case 'd': + dst = appendInt2(dst, int(t.Day()), flag) + return nil + case 'e': + dst = appendInt2(dst, int(t.Day()), ' ') + return nil + case 'I': + dst = append12Hour(dst, t, flag) + return nil + case 'l': + dst = append12Hour(dst, t, ' ') + return nil + case 'H': + dst = appendInt2(dst, t.Hour(), flag) + return nil + case 'k': + dst = appendInt2(dst, t.Hour(), ' ') + return nil + case 'M': + dst = appendInt2(dst, t.Minute(), flag) + return nil + case 'S': + dst = appendInt2(dst, t.Second(), flag) + return nil + case 'L': + dst = append(dst, t.Format(".000")[1:]...) + return nil + case 'f': + dst = append(dst, t.Format(".000000")[1:]...) + return nil + case 'N': + dst = append(dst, t.Format(".000000000")[1:]...) + return nil + case 'y': + dst = t.AppendFormat(dst, "06") + return nil + case 'Y': + dst = t.AppendFormat(dst, "2006") + return nil + case 'C': + dst = t.AppendFormat(dst, "2006") + dst = dst[:len(dst)-2] + return nil + case 'U': + dst = appendWeekNumber(dst, t, flag, true) + return nil + case 'W': + dst = appendWeekNumber(dst, t, flag, false) + return nil + case 'V': + _, w := t.ISOWeek() + dst = appendInt2(dst, w, flag) + return nil + case 'g': + y, _ := t.ISOWeek() + dst = year(y).AppendFormat(dst, "06") + return nil + case 'G': + y, _ := t.ISOWeek() + dst = year(y).AppendFormat(dst, "2006") + return nil + case 's': + dst = strconv.AppendInt(dst, t.Unix(), 10) + return nil + case 'Q': + dst = strconv.AppendInt(dst, t.UnixMilli(), 10) + return nil + case 'w': + w := t.Weekday() + dst = appendInt1(dst, int(w)) + return nil + case 'u': + if w := t.Weekday(); w == 0 { + dst = append(dst, '7') + } else { + dst = appendInt1(dst, int(w)) + } + return nil + case 'j': + if flag == '-' { + dst = strconv.AppendInt(dst, int64(t.YearDay()), 10) + } else { + dst = t.AppendFormat(dst, "002") + } + return nil + } + + if layout := goLayout(spec, flag, false); layout != "" { + dst = t.AppendFormat(dst, layout) + return nil + } + + dst = append(dst, '%') + if flag != 0 { + dst = append(dst, flag) + } + dst = append(dst, spec) + return nil + } + + parser.parse(fmt) + return dst +} + +// Parse converts a textual representation of time to the time value it represents +// according to the strptime format specification. +func Parse(fmt, value string) (time.Time, error) { + pattern, err := layout(fmt, true) + if err != nil { + return time.Time{}, err + } + return time.Parse(pattern, value) +} + +// Layout converts a strftime format specification +// to a Go time pattern specification. +func Layout(fmt string) (string, error) { + return layout(fmt, false) +} + +func layout(fmt string, parsing bool) (string, error) { + dst := buffer(fmt) + var parser parser + + parser.literal = func(b byte) error { + if '0' <= b && b <= '9' { + return literalErr(b) + } + dst = append(dst, b) + if b == 'M' || b == 'T' || b == 'm' || b == 'n' { + switch { + case bytes.HasSuffix(dst, []byte("Jan")): + return literalErr("Jan") + case bytes.HasSuffix(dst, []byte("Mon")): + return literalErr("Mon") + case bytes.HasSuffix(dst, []byte("MST")): + return literalErr("MST") + case bytes.HasSuffix(dst, []byte("PM")): + return literalErr("PM") + case bytes.HasSuffix(dst, []byte("pm")): + return literalErr("pm") + } + } + return nil + } + + parser.format = func(spec, flag byte) error { + if layout := goLayout(spec, flag, parsing); layout != "" { + dst = append(dst, layout...) + return nil + } + + switch spec { + default: + return formatError{} + + case 'L', 'f', 'N': + if bytes.HasSuffix(dst, []byte(".")) || bytes.HasSuffix(dst, []byte(",")) { + switch spec { + default: + dst = append(dst, "000"...) + case 'f': + dst = append(dst, "000000"...) + case 'N': + dst = append(dst, "000000000"...) + } + return nil + } + return formatError{message: "must follow '.' or ','"} + } + } + + if err := parser.parse(fmt); err != nil { + return "", err + } + return string(dst), nil +} + +// UTS35 converts a strftime format specification +// to a Unicode Technical Standard #35 Date Format Pattern. +func UTS35(fmt string) (string, error) { + const quote = '\'' + var quoted bool + dst := buffer(fmt) + + var parser parser + + parser.literal = func(b byte) error { + if b == quote { + dst = append(dst, quote, quote) + return nil + } + if !quoted && ('a' <= b && b <= 'z' || 'A' <= b && b <= 'Z') { + dst = append(dst, quote) + quoted = true + } + dst = append(dst, b) + return nil + } + + parser.format = func(spec, flag byte) error { + if quoted { + dst = append(dst, quote) + quoted = false + } + if pattern := uts35Pattern(spec, flag); pattern != "" { + dst = append(dst, pattern...) + return nil + } + return formatError{} + } + + if err := parser.parse(fmt); err != nil { + return "", err + } + if quoted { + dst = append(dst, quote) + } + return string(dst), nil +} + +func buffer(format string) (buf []byte) { + const bufSize = 64 + max := len(format) + 10 + if max < bufSize { + var b [bufSize]byte + buf = b[:0] + } else { + buf = make([]byte, 0, max) + } + return +} + +func year(y int) time.Time { + return time.Date(y, time.January, 1, 0, 0, 0, 0, time.UTC) +} + +func appendWeekNumber(dst []byte, t time.Time, flag byte, sunday bool) []byte { + offset := int(t.Weekday()) + if sunday { + offset = 6 - offset + } else if offset != 0 { + offset = 7 - offset + } + return appendInt2(dst, (t.YearDay()+offset)/7, flag) +} + +func append12Hour(dst []byte, t time.Time, flag byte) []byte { + h := t.Hour() + if h == 0 { + h = 12 + } else if h > 12 { + h -= 12 + } + return appendInt2(dst, h, flag) +} + +func appendInt1(dst []byte, i int) []byte { + return append(dst, byte('0'+i)) +} + +func appendInt2(dst []byte, i int, flag byte) []byte { + if flag == 0 || i >= 10 { + return append(dst, smallsString[i*2:i*2+2]...) + } + if flag == ' ' { + dst = append(dst, flag) + } + return appendInt1(dst, i) +} + +const smallsString = "" + + "00010203040506070809" + + "10111213141516171819" + + "20212223242526272829" + + "30313233343536373839" + + "40414243444546474849" + + "50515253545556575859" + + "60616263646566676869" + + "70717273747576777879" + + "80818283848586878889" + + "90919293949596979899" diff --git a/vendor/github.com/radareorg/r2pipe-go/LICENSE b/vendor/github.com/radareorg/r2pipe-go/LICENSE new file mode 100644 index 0000000..fd92b03 --- /dev/null +++ b/vendor/github.com/radareorg/r2pipe-go/LICENSE @@ -0,0 +1,23 @@ +The MIT License (MIT) + +Copyright (c) 2021 radare + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + diff --git a/vendor/github.com/radareorg/r2pipe-go/Makefile b/vendor/github.com/radareorg/r2pipe-go/Makefile new file mode 100644 index 0000000..5dd762a --- /dev/null +++ b/vendor/github.com/radareorg/r2pipe-go/Makefile @@ -0,0 +1,12 @@ +all: + go test + +ex: + cd example ; go run example.go + +sync: + git clone --depth=1 https://github.com/radareorg/radare2-r2pipe + cp -rf radare2-r2pipe/go/*.go . + rm -rf radare2-r2pipe + +.PHONY: all sync diff --git a/vendor/github.com/radareorg/r2pipe-go/README.md b/vendor/github.com/radareorg/r2pipe-go/README.md new file mode 100644 index 0000000..238a1bc --- /dev/null +++ b/vendor/github.com/radareorg/r2pipe-go/README.md @@ -0,0 +1,17 @@ +r2pipe.go +========= + +Go module to interact with radare2 + +Source +------ + +This repository is in sync with [radareorg/r2pipe](https://godoc.org/github.com/radareorg/radare2-r2pipe). + +Run `make sync` to update it after changing things in the source repository. + +Documentation +------------- + +[![GoDoc](https://godoc.org/github.com/radare/r2pipe-go?status.svg)](https://godoc.org/github.com/radare/r2pipe-go) + diff --git a/vendor/github.com/radareorg/r2pipe-go/api.go b/vendor/github.com/radareorg/r2pipe-go/api.go new file mode 100644 index 0000000..9ccf5ca --- /dev/null +++ b/vendor/github.com/radareorg/r2pipe-go/api.go @@ -0,0 +1,47 @@ +// radare - LGPL - Copyright 2021 - pancake + +package r2pipe + +// #cgo CFLAGS: -I/usr/local/include/libr +// #cgo CFLAGS: -I/usr/local/include/libr/sdb +// #cgo LDFLAGS: -L/usr/local/lib -lr_core +// #include +// #include +// extern void r_core_free(void *); +// extern void *r_core_new(void); +// extern char *r_core_cmd_str(void*, const char *); +// +import "C" + +import ( + "unsafe" +) + +func (r2p *Pipe) ApiCmd(cmd string) (string, error) { + res := C.r_core_cmd_str(r2p.Core, C.CString(cmd)) + return C.GoString(res), nil +} + +func (r2p *Pipe) ApiClose() error { + C.r_core_free(unsafe.Pointer(r2p.Core)) + r2p.Core = nil + return nil +} + +func NewApiPipe(file string) (*Pipe, error) { + r2 := C.r_core_new() + r2p := &Pipe{ + File: file, + Core: r2, + cmd: func(r2p *Pipe, cmd string) (string, error) { + return r2p.ApiCmd(cmd) + }, + close: func(r2p *Pipe) error { + return r2p.ApiClose() + }, + } + if file != "" { + r2p.ApiCmd("o " + file) + } + return r2p, nil +} diff --git a/vendor/github.com/radareorg/r2pipe-go/native.go b/vendor/github.com/radareorg/r2pipe-go/native.go new file mode 100644 index 0000000..2283717 --- /dev/null +++ b/vendor/github.com/radareorg/r2pipe-go/native.go @@ -0,0 +1,161 @@ +// radare - LGPL - Copyright 2017 - pancake + +package r2pipe + +//#cgo linux LDFLAGS: -ldl +//#include +//#include +// #include +// void *gor_core_new(void *f) { +// void *(*rcn)(); +// rcn = (void *(*)())f; +// return rcn(); +// } +// +// void gor_core_free(void *f, void *arg) { +// void (*fr)(void *); +// fr = (void (*)(void *))f; +// fr(arg); +// } +// +// char *gor_core_cmd_str(void *f, void *arg, char *arg2) { +// char *(*cmdstr)(void *, char *); +// cmdstr = (char *(*)(void *, char *))f; +// return cmdstr(arg, arg2); +// } +import "C" + +import ( + "errors" + "fmt" + "os" + "runtime" + "unsafe" +) + +type Ptr = unsafe.Pointer + +// *struct{} + +var ( + lib Ptr = nil + r_core_new func() Ptr + r_core_free func(Ptr) + r_core_cmd_str func(Ptr, string) string +) + +type DL struct { + handle unsafe.Pointer + name string +} + +func libpath(libname string) string { + paths := []string{"/usr/local/lib", "/usr/lib", "/lib"} + for _, path := range paths { + if _, err := os.Stat(path); errors.Is(err, os.ErrNotExist) { + // path/to/whatever does not exist + } else { + // file exists + return path + "/" + libname + } + } + return libname +} + +func dlOpen(path string) (*DL, error) { + var ret DL + switch runtime.GOOS { + case "darwin": + path = path + ".dylib" + case "windows": + path = path + ".dll" + default: + path = path + ".so" //linux/bsds + } + path = libpath(path) + cpath := C.CString(path) + if cpath == nil { + return nil, errors.New("Failed to get cpath") + } + //r2pioe only uses flag 0 + ret.handle = C.dlopen(cpath, 0) + ret.name = path + C.free(unsafe.Pointer(cpath)) + if ret.handle == nil { + return nil, errors.New(fmt.Sprintf("Failed to open %s", path)) + } + return &ret, nil +} + +func dlSym(dl *DL, name string) (unsafe.Pointer, error) { + cname := C.CString(name) + if cname == nil { + return nil, errors.New("Fail") + } + handle := C.dlsym(dl.handle, cname) + C.free(unsafe.Pointer(cname)) + if handle == nil { + return nil, errors.New("Fail") + } + return handle, nil +} + +func NativeLoad() error { + if lib != nil { + return nil + } + lib, err := dlOpen("libr_core") + _ = lib + if err != nil { + return err + } + handle1, _ := dlSym(lib, "r_core_new") + r_core_new = func() Ptr { + a := (Ptr)(C.gor_core_new(handle1)) + return a + } + handle2, _ := dlSym(lib, "r_core_free") + r_core_free = func(p Ptr) { + C.gor_core_free(handle2, unsafe.Pointer(p)) + } + handle3, _ := dlSym(lib, "r_core_cmd_str") + r_core_cmd_str = func(p Ptr, s string) string { + a := C.CString(s) + b := C.gor_core_cmd_str(handle3, unsafe.Pointer(p), a) + C.free(unsafe.Pointer(a)) + return C.GoString(b) + } + return nil +} + +func (r2p *Pipe) NativeCmd(cmd string) (string, error) { + res := r_core_cmd_str(r2p.Core, cmd) + return res, nil +} + +func (r2p *Pipe) NativeClose() error { + r_core_free(r2p.Core) + r2p.Core = nil + return nil +} + +func NewNativePipe(file string) (*Pipe, error) { + if err := NativeLoad(); err != nil { + return nil, err + } + r2 := r_core_new() + r2p := &Pipe{ + File: file, + Core: r2, + cmd: func(r2p *Pipe, cmd string) (string, error) { + return r2p.NativeCmd(cmd) + }, + close: func(r2p *Pipe) error { + return r2p.NativeClose() + }, + } + if file != "" { + r2p.NativeCmd("o " + file) + } + return r2p, nil +} diff --git a/vendor/github.com/radareorg/r2pipe-go/r2pipe.go b/vendor/github.com/radareorg/r2pipe-go/r2pipe.go new file mode 100644 index 0000000..d87e2fe --- /dev/null +++ b/vendor/github.com/radareorg/r2pipe-go/r2pipe.go @@ -0,0 +1,243 @@ +// radare - LGPL - Copyright 2015 - nibble + +/* +Package r2pipe allows to call r2 commands from Go. A simple hello world would +look like the following snippet: + + package main + + import ( + "fmt" + + "github.com/radare/r2pipe-go" + ) + + func main() { + r2p, err := r2pipe.NewPipe("malloc://256") + if err != nil { + panic(err) + } + defer r2p.Close() + + _, err = r2p.Cmd("w Hello World") + if err != nil { + panic(err) + } + buf, err := r2p.Cmd("ps") + if err != nil { + panic(err) + } + fmt.Println(buf) + } +*/ +package r2pipe + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "os/exec" + "strconv" + "strings" + "unsafe" +) + +// A Pipe represents a communication interface with r2 that will be used to +// execute commands and obtain their results. +type Pipe struct { + File string + r2cmd *exec.Cmd + stdin io.WriteCloser + stdout io.ReadCloser + stderr io.ReadCloser + Core unsafe.Pointer + cmd CmdDelegate + close CloseDelegate +} + +type ( + CmdDelegate func(*Pipe, string) (string, error) + CloseDelegate func(*Pipe) error + EventDelegate func(*Pipe, string, interface{}, string) bool +) + +// NewPipe returns a new r2 pipe and initializes an r2 core that will try to +// load the provided file or URI. If file is an empty string, the env vars +// R2PIPE_{IN,OUT} will be used as file descriptors for input and output, this +// is the case when r2pipe is called within r2. +func NewPipe(file string) (*Pipe, error) { + if file == "" { + return newPipeFd() + } + + return newPipeCmd(file) +} + +func newPipeFd() (*Pipe, error) { + r2pipeIn := os.Getenv("R2PIPE_IN") + r2pipeOut := os.Getenv("R2PIPE_OUT") + + if r2pipeIn == "" || r2pipeOut == "" { + return nil, fmt.Errorf("missing R2PIPE_{IN,OUT} vars") + } + + r2pipeInFd, err := strconv.Atoi(r2pipeIn) + if err != nil { + return nil, fmt.Errorf("failed to convert IN into file descriptor") + } + + r2pipeOutFd, err := strconv.Atoi(r2pipeOut) + if err != nil { + return nil, fmt.Errorf("failed to convert OUT into file descriptor") + } + + stdout := os.NewFile(uintptr(r2pipeInFd), "R2PIPE_IN") + stdin := os.NewFile(uintptr(r2pipeOutFd), "R2PIPE_OUT") + + r2p := &Pipe{ + File: "", + r2cmd: nil, + stdin: stdin, + stdout: stdout, + Core: nil, + } + + return r2p, nil +} + +func newPipeCmd(file string) (*Pipe, error) { + + r2p := &Pipe{File: file, r2cmd: exec.Command("radare2", "-q0", file)} + var err error + r2p.stdin, err = r2p.r2cmd.StdinPipe() + if err == nil { + r2p.stdout, err = r2p.r2cmd.StdoutPipe() + if err == nil { + r2p.stderr, err = r2p.r2cmd.StdoutPipe() + } + if err = r2p.r2cmd.Start(); err == nil { + //Read the initial data + _, err = bufio.NewReader(r2p.stdout).ReadString('\x00') + } + } + return r2p, err +} + +// Write implements the standard Write interface: it writes data to the r2 +// pipe, blocking until r2 have consumed all the data. +func (r2p *Pipe) Write(p []byte) (n int, err error) { + return r2p.stdin.Write(p) +} + +// Read implements the standard Read interface: it reads data from the r2 +// pipe's stdin, blocking until the previously issued commands have finished. +func (r2p *Pipe) Read(p []byte) (n int, err error) { + return r2p.stdout.Read(p) +} + +func (r2p *Pipe) ReadErr(p []byte) (n int, err error) { + return r2p.stderr.Read(p) +} + +func (r2p *Pipe) On(evname string, p interface{}, cb EventDelegate) error { + path, err := r2p.Cmd("===stderr") + if err != nil { + return err + } + f, err := os.OpenFile(path, os.O_RDONLY, 0600) + + if err != nil { + return err + } + go func() { + var buf bytes.Buffer + for { + io.Copy(&buf, f) + if buf.Len() > 0 { + if !cb(r2p, evname, p, buf.String()) { + break + } + } + } + f.Close() + }() + return nil +} + +// Cmd is a helper that allows to run r2 commands and receive their output. +func (r2p *Pipe) Cmd(cmd string) (string, error) { + if r2p.Core != nil { + if r2p.cmd != nil { + return r2p.cmd(r2p, cmd) + } + + return "", nil + } + + if _, err := fmt.Fprintln(r2p, cmd); err != nil { + return "", err + } + + buf, err := bufio.NewReader(r2p).ReadString('\x00') + if err != nil { + return "", err + } + return strings.TrimRight(buf, "\n\x00"), err +} + +//like cmd but formats the command +func (r2p *Pipe) Cmdf(f string, args ...interface{}) (string, error) { + return r2p.Cmd(fmt.Sprintf(f, args...)) +} + +// Cmdj acts like Cmd but interprets the output of the command as json. It +// returns the parsed json keys and values. +func (r2p *Pipe) Cmdj(cmd string) (out interface{}, err error) { + rstr, err := r2p.Cmd(cmd) + if err == nil { + err = json.Unmarshal([]byte(rstr), out) + } + return out, err +} + +//like cmdj but formats the command +func (r2p *Pipe) Cmdjf(f string, args ...interface{}) (interface{}, error) { + return r2p.Cmdj(fmt.Sprintf(f, args...)) +} + +// Close shuts down r2, closing the created pipe. +func (r2p *Pipe) Close() error { + if r2p.close != nil { + return r2p.close(r2p) + } + + if r2p.File == "" { + return nil + } + + if _, err := r2p.Cmd("q"); err != nil { + return err + } + + return r2p.r2cmd.Wait() +} + +// Forcing shutdown of r2, closing the created pipe. +func (r2p *Pipe) ForceClose() error { + if r2p.close != nil { + return r2p.close(r2p) + } + + if r2p.File == "" { + return nil + } + + if _, err := r2p.Cmd("q!"); err != nil { + return err + } + + return r2p.r2cmd.Wait() +} diff --git a/vendor/github.com/remyoudompheng/bigfft/LICENSE b/vendor/github.com/remyoudompheng/bigfft/LICENSE new file mode 100644 index 0000000..7448756 --- /dev/null +++ b/vendor/github.com/remyoudompheng/bigfft/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/remyoudompheng/bigfft/README b/vendor/github.com/remyoudompheng/bigfft/README new file mode 100644 index 0000000..0fcd39d --- /dev/null +++ b/vendor/github.com/remyoudompheng/bigfft/README @@ -0,0 +1,54 @@ +This library is a toy proof-of-concept implementation of the +well-known Schonhage-Strassen method for multiplying integers. +It is not expected to have a real life usecase outside number +theory computations, nor is it expected to be used in any production +system. + +If you are using it in your project, you may want to carefully +examine the actual requirement or problem you are trying to solve. + +# Comparison with the standard library and GMP + +Benchmarking math/big vs. bigfft + +Number size old ns/op new ns/op delta + 1kb 1599 1640 +2.56% + 10kb 61533 62170 +1.04% + 50kb 833693 831051 -0.32% +100kb 2567995 2693864 +4.90% + 1Mb 105237800 28446400 -72.97% + 5Mb 1272947000 168554600 -86.76% + 10Mb 3834354000 405120200 -89.43% + 20Mb 11514488000 845081600 -92.66% + 50Mb 49199945000 2893950000 -94.12% +100Mb 147599836000 5921594000 -95.99% + +Benchmarking GMP vs bigfft + +Number size GMP ns/op Go ns/op delta + 1kb 536 1500 +179.85% + 10kb 26669 50777 +90.40% + 50kb 252270 658534 +161.04% +100kb 686813 2127534 +209.77% + 1Mb 12100000 22391830 +85.06% + 5Mb 111731843 133550600 +19.53% + 10Mb 212314000 318595800 +50.06% + 20Mb 490196000 671512800 +36.99% + 50Mb 1280000000 2451476000 +91.52% +100Mb 2673000000 5228991000 +95.62% + +Benchmarks were run on a Core 2 Quad Q8200 (2.33GHz). +FFT is enabled when input numbers are over 200kbits. + +Scanning large decimal number from strings. +(math/big [n^2 complexity] vs bigfft [n^1.6 complexity], Core i5-4590) + +Digits old ns/op new ns/op delta +1e3 9995 10876 +8.81% +1e4 175356 243806 +39.03% +1e5 9427422 6780545 -28.08% +1e6 1776707489 144867502 -91.85% +2e6 6865499995 346540778 -94.95% +5e6 42641034189 1069878799 -97.49% +10e6 151975273589 2693328580 -98.23% + diff --git a/vendor/github.com/remyoudompheng/bigfft/arith_decl.go b/vendor/github.com/remyoudompheng/bigfft/arith_decl.go new file mode 100644 index 0000000..96937df --- /dev/null +++ b/vendor/github.com/remyoudompheng/bigfft/arith_decl.go @@ -0,0 +1,33 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bigfft + +import ( + "math/big" + _ "unsafe" +) + +type Word = big.Word + +//go:linkname addVV math/big.addVV +func addVV(z, x, y []Word) (c Word) + +//go:linkname subVV math/big.subVV +func subVV(z, x, y []Word) (c Word) + +//go:linkname addVW math/big.addVW +func addVW(z, x []Word, y Word) (c Word) + +//go:linkname subVW math/big.subVW +func subVW(z, x []Word, y Word) (c Word) + +//go:linkname shlVU math/big.shlVU +func shlVU(z, x []Word, s uint) (c Word) + +//go:linkname mulAddVWW math/big.mulAddVWW +func mulAddVWW(z, x []Word, y, r Word) (c Word) + +//go:linkname addMulVVW math/big.addMulVVW +func addMulVVW(z, x []Word, y Word) (c Word) diff --git a/vendor/github.com/remyoudompheng/bigfft/fermat.go b/vendor/github.com/remyoudompheng/bigfft/fermat.go new file mode 100644 index 0000000..200ee57 --- /dev/null +++ b/vendor/github.com/remyoudompheng/bigfft/fermat.go @@ -0,0 +1,216 @@ +package bigfft + +import ( + "math/big" +) + +// Arithmetic modulo 2^n+1. + +// A fermat of length w+1 represents a number modulo 2^(w*_W) + 1. The last +// word is zero or one. A number has at most two representatives satisfying the +// 0-1 last word constraint. +type fermat nat + +func (n fermat) String() string { return nat(n).String() } + +func (z fermat) norm() { + n := len(z) - 1 + c := z[n] + if c == 0 { + return + } + if z[0] >= c { + z[n] = 0 + z[0] -= c + return + } + // z[0] < z[n]. + subVW(z, z, c) // Substract c + if c > 1 { + z[n] -= c - 1 + c = 1 + } + // Add back c. + if z[n] == 1 { + z[n] = 0 + return + } else { + addVW(z, z, 1) + } +} + +// Shift computes (x << k) mod (2^n+1). +func (z fermat) Shift(x fermat, k int) { + if len(z) != len(x) { + panic("len(z) != len(x) in Shift") + } + n := len(x) - 1 + // Shift by n*_W is taking the opposite. + k %= 2 * n * _W + if k < 0 { + k += 2 * n * _W + } + neg := false + if k >= n*_W { + k -= n * _W + neg = true + } + + kw, kb := k/_W, k%_W + + z[n] = 1 // Add (-1) + if !neg { + for i := 0; i < kw; i++ { + z[i] = 0 + } + // Shift left by kw words. + // x = a·2^(n-k) + b + // x< 0 { + z[kw+1] -= b + } else { + subVW(z[kw+1:], z[kw+1:], b) + } + } else { + for i := kw + 1; i < n; i++ { + z[i] = 0 + } + // Shift left and negate, by kw words. + copy(z[:kw+1], x[n-kw:n+1]) // z_low = x_high + b := subVV(z[kw:n], z[kw:n], x[:n-kw]) // z_high -= x_low + z[n] -= b + } + // Add back 1. + if z[n] > 0 { + z[n]-- + } else if z[0] < ^big.Word(0) { + z[0]++ + } else { + addVW(z, z, 1) + } + // Shift left by kb bits + shlVU(z, z, uint(kb)) + z.norm() +} + +// ShiftHalf shifts x by k/2 bits the left. Shifting by 1/2 bit +// is multiplication by sqrt(2) mod 2^n+1 which is 2^(3n/4) - 2^(n/4). +// A temporary buffer must be provided in tmp. +func (z fermat) ShiftHalf(x fermat, k int, tmp fermat) { + n := len(z) - 1 + if k%2 == 0 { + z.Shift(x, k/2) + return + } + u := (k - 1) / 2 + a := u + (3*_W/4)*n + b := u + (_W/4)*n + z.Shift(x, a) + tmp.Shift(x, b) + z.Sub(z, tmp) +} + +// Add computes addition mod 2^n+1. +func (z fermat) Add(x, y fermat) fermat { + if len(z) != len(x) { + panic("Add: len(z) != len(x)") + } + addVV(z, x, y) // there cannot be a carry here. + z.norm() + return z +} + +// Sub computes substraction mod 2^n+1. +func (z fermat) Sub(x, y fermat) fermat { + if len(z) != len(x) { + panic("Add: len(z) != len(x)") + } + n := len(y) - 1 + b := subVV(z[:n], x[:n], y[:n]) + b += y[n] + // If b > 0, we need to subtract b< 2*n+1 { + panic("len(z) > 2n+1") + } + // We now have + // z = z[:n] + 1<<(n*W) * z[n:2n+1] + // which normalizes to: + // z = z[:n] - z[n:2n] + z[2n] + c1 := big.Word(0) + if len(z) > 2*n { + c1 = addVW(z[:n], z[:n], z[2*n]) + } + c2 := big.Word(0) + if len(z) >= 2*n { + c2 = subVV(z[:n], z[:n], z[n:2*n]) + } else { + m := len(z) - n + c2 = subVV(z[:m], z[:m], z[n:]) + c2 = subVW(z[m:n], z[m:n], c2) + } + // Restore carries. + // Substracting z[n] -= c2 is the same + // as z[0] += c2 + z = z[:n+1] + z[n] = c1 + c := addVW(z, z, c2) + if c != 0 { + panic("impossible") + } + z.norm() + return z +} + +// copied from math/big +// +// basicMul multiplies x and y and leaves the result in z. +// The (non-normalized) result is placed in z[0 : len(x) + len(y)]. +func basicMul(z, x, y fermat) { + // initialize z + for i := 0; i < len(z); i++ { + z[i] = 0 + } + for i, d := range y { + if d != 0 { + z[len(x)+i] = addMulVVW(z[i:i+len(x)], x, d) + } + } +} diff --git a/vendor/github.com/remyoudompheng/bigfft/fft.go b/vendor/github.com/remyoudompheng/bigfft/fft.go new file mode 100644 index 0000000..2d4c1e7 --- /dev/null +++ b/vendor/github.com/remyoudompheng/bigfft/fft.go @@ -0,0 +1,370 @@ +// Package bigfft implements multiplication of big.Int using FFT. +// +// The implementation is based on the Schönhage-Strassen method +// using integer FFT modulo 2^n+1. +package bigfft + +import ( + "math/big" + "unsafe" +) + +const _W = int(unsafe.Sizeof(big.Word(0)) * 8) + +type nat []big.Word + +func (n nat) String() string { + v := new(big.Int) + v.SetBits(n) + return v.String() +} + +// fftThreshold is the size (in words) above which FFT is used over +// Karatsuba from math/big. +// +// TestCalibrate seems to indicate a threshold of 60kbits on 32-bit +// arches and 110kbits on 64-bit arches. +var fftThreshold = 1800 + +// Mul computes the product x*y and returns z. +// It can be used instead of the Mul method of +// *big.Int from math/big package. +func Mul(x, y *big.Int) *big.Int { + xwords := len(x.Bits()) + ywords := len(y.Bits()) + if xwords > fftThreshold && ywords > fftThreshold { + return mulFFT(x, y) + } + return new(big.Int).Mul(x, y) +} + +func mulFFT(x, y *big.Int) *big.Int { + var xb, yb nat = x.Bits(), y.Bits() + zb := fftmul(xb, yb) + z := new(big.Int) + z.SetBits(zb) + if x.Sign()*y.Sign() < 0 { + z.Neg(z) + } + return z +} + +// A FFT size of K=1< bits { + k = uint(i) + break + } + } + // The 1< words + m = words>>k + 1 + return +} + +// valueSize returns the length (in words) to use for polynomial +// coefficients, to compute a correct product of polynomials P*Q +// where deg(P*Q) < K (== 1<= 2*m*W+K + n := 2*m*_W + int(k) // necessary bits + K := 1 << (k - extra) + if K < _W { + K = _W + } + n = ((n / K) + 1) * K // round to a multiple of K + return n / _W +} + +// poly represents an integer via a polynomial in Z[x]/(x^K+1) +// where K is the FFT length and b^m is the computation basis 1<<(m*_W). +// If P = a[0] + a[1] x + ... a[n] x^(K-1), the associated natural number +// is P(b^m). +type poly struct { + k uint // k is such that K = 1< 0 { + length += len(p.a[na-1]) + } + n := make(nat, length) + m := p.m + np := n + for i := range p.a { + l := len(p.a[i]) + c := addVV(np[:l], np[:l], p.a[i]) + if np[l] < ^big.Word(0) { + np[l] += c + } else { + addVW(np[l:], np[l:], c) + } + np = np[m:] + } + n = trim(n) + return n +} + +func trim(n nat) nat { + for i := range n { + if n[len(n)-1-i] != 0 { + return n[:len(n)-i] + } + } + return nil +} + +// Mul multiplies p and q modulo X^K-1, where K = 1<= 1<= 1<> k + // p(x) = a_0 + a_1 x + ... + a_{K-1} x^(K-1) + // p(θx) = q(x) where + // q(x) = a_0 + θa_1 x + ... + θ^(K-1) a_{K-1} x^(K-1) + // + // Twist p by θ to obtain q. + tbits := make([]big.Word, (n+1)<> k + + // Perform an inverse Fourier transform to recover q. + qbits := make([]big.Word, (n+1)<> size + if backward { + ω2shift = -ω2shift + } + + // Easy cases. + if len(src[0]) != n+1 || len(dst[0]) != n+1 { + panic("len(src[0]) != n+1 || len(dst[0]) != n+1") + } + switch size { + case 0: + copy(dst[0], src[0]) + return + case 1: + dst[0].Add(src[0], src[1< quadraticScanThreshold; n /= 2 { + pow++ + } + // threshold * 2^(pow-1) <= size < threshold * 2^pow + return quadraticScanThreshold << (pow - 1), s.power(pow - 1) +} + +func (s *scanner) power(k uint) *big.Int { + for i := len(s.powers); i <= int(k); i++ { + z := new(big.Int) + if i == 0 { + if quadraticScanThreshold%14 != 0 { + panic("quadraticScanThreshold % 14 != 0") + } + z.Exp(big.NewInt(1e14), big.NewInt(quadraticScanThreshold/14), nil) + } else { + z.Mul(s.powers[i-1], s.powers[i-1]) + } + s.powers = append(s.powers, z) + } + return s.powers[k] +} + +func (s *scanner) scan(z *big.Int, str string) { + if len(str) <= quadraticScanThreshold { + z.SetString(str, 10) + return + } + sz, pow := s.chunkSize(len(str)) + // Scan the left half. + s.scan(z, str[:len(str)-sz]) + // FIXME: reuse temporaries. + left := Mul(z, pow) + // Scan the right half + s.scan(z, str[len(str)-sz:]) + z.Add(z, left) +} + +// quadraticScanThreshold is the number of digits +// below which big.Int.SetString is more efficient +// than subquadratic algorithms. +// 1232 digits fit in 4096 bits. +const quadraticScanThreshold = 1232 -- cgit 1.4.1