chore: bumps github.com/open-policy-agent/opa

This commit is contained in:
Michal Klos
2026-01-14 04:28:56 +01:00
parent 80de57468b
commit c502418b11
428 changed files with 75797 additions and 4624 deletions

View File

@@ -3,6 +3,9 @@ IMPORT := ($OCIS_REPO)/$(NAME)
BIN := bin
DIST := dist
# Enable automatic toolchain downloads to match go.mod requirements
export GOTOOLCHAIN := auto
ifeq ($(OS), Windows_NT)
EXECUTABLE := $(NAME).exe
UNAME := Windows

26
go.mod
View File

@@ -1,8 +1,8 @@
module github.com/owncloud/ocis/v2
go 1.24.0
go 1.24.6
toolchain go1.24.4
toolchain go1.24.9
require (
dario.cat/mergo v1.0.2
@@ -63,7 +63,7 @@ require (
github.com/onsi/ginkgo v1.16.5
github.com/onsi/ginkgo/v2 v2.27.2
github.com/onsi/gomega v1.38.2
github.com/open-policy-agent/opa v1.6.0
github.com/open-policy-agent/opa v1.10.1
github.com/orcaman/concurrent-map v1.0.0
github.com/owncloud/libre-graph-api-go v1.0.5-0.20251107084958-31937a4ea3f1
github.com/owncloud/reva/v2 v2.0.0-20251107154850-a122a9538794
@@ -75,9 +75,9 @@ require (
github.com/rs/cors v1.11.1
github.com/rs/zerolog v1.34.0
github.com/shamaton/msgpack/v2 v2.3.1
github.com/sirupsen/logrus v1.9.3
github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af
github.com/spf13/afero v1.15.0
github.com/spf13/cobra v1.9.1
github.com/spf13/cobra v1.10.1
github.com/stretchr/testify v1.11.1
github.com/test-go/testify v1.1.4
github.com/thejerf/suture/v4 v4.0.6
@@ -167,6 +167,7 @@ require (
github.com/cyphar/filepath-securejoin v0.2.5 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/deckarep/golang-set v1.8.0 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
github.com/desertbit/timer v1.0.1 // indirect
github.com/dgraph-io/ristretto v0.2.0 // indirect
github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da // indirect
@@ -243,6 +244,14 @@ require (
github.com/klauspost/cpuid/v2 v2.2.11 // indirect
github.com/kovidgoyal/go-parallel v1.1.1 // indirect
github.com/leodido/go-urn v1.4.0 // indirect
github.com/lestrrat-go/blackmagic v1.0.4 // indirect
github.com/lestrrat-go/dsig v1.0.0 // indirect
github.com/lestrrat-go/dsig-secp256k1 v1.0.0 // indirect
github.com/lestrrat-go/httpcc v1.0.1 // indirect
github.com/lestrrat-go/httprc/v3 v3.0.1 // indirect
github.com/lestrrat-go/jwx/v3 v3.0.11 // indirect
github.com/lestrrat-go/option v1.0.1 // indirect
github.com/lestrrat-go/option/v2 v2.0.0 // indirect
github.com/libregraph/oidc-go v1.1.0 // indirect
github.com/longsleep/go-metrics v1.0.0 // indirect
github.com/longsleep/rndm v1.2.0 // indirect
@@ -293,6 +302,7 @@ require (
github.com/russellhaering/goxmldsig v1.5.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd // indirect
github.com/segmentio/asm v1.2.0 // indirect
github.com/segmentio/kafka-go v0.4.49 // indirect
github.com/segmentio/ksuid v1.0.4 // indirect
github.com/sercand/kuberesolver/v5 v5.1.1 // indirect
@@ -302,7 +312,7 @@ require (
github.com/shurcooL/vfsgen v0.0.0-20230704071429-0000e147ea92 // indirect
github.com/skeema/knownhosts v1.3.0 // indirect
github.com/spacewander/go-suffix-tree v0.0.0-20191010040751-0865e368c784 // indirect
github.com/spf13/pflag v1.0.6 // indirect
github.com/spf13/pflag v1.0.10 // indirect
github.com/stretchr/objx v0.5.2 // indirect
github.com/studio-b12/gowebdav v0.9.0 // indirect
github.com/tchap/go-patricia/v2 v2.3.3 // indirect
@@ -311,6 +321,7 @@ require (
github.com/tinylib/msgp v1.3.0 // indirect
github.com/toorop/go-dkim v0.0.0-20201103131630-e1cd1a0a5208 // indirect
github.com/trustelem/zxcvbn v1.0.1 // indirect
github.com/valyala/fastjson v1.6.4 // indirect
github.com/vektah/gqlparser/v2 v2.5.30 // indirect
github.com/wk8/go-ordered-map v1.0.0 // indirect
github.com/xanzy/ssh-agent v0.3.3 // indirect
@@ -324,7 +335,6 @@ require (
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0 // indirect
go.opentelemetry.io/otel/metric v1.38.0 // indirect
go.opentelemetry.io/proto/otlp v1.7.1 // indirect
go.uber.org/multierr v1.11.0 // indirect
@@ -341,7 +351,7 @@ require (
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
sigs.k8s.io/yaml v1.5.0 // indirect
sigs.k8s.io/yaml v1.6.0 // indirect
)
replace github.com/studio-b12/gowebdav => github.com/kobergj/gowebdav v0.0.0-20251030165916-532350997dde

55
go.sum
View File

@@ -163,8 +163,8 @@ github.com/bombsimon/logrusr/v3 v3.1.0 h1:zORbLM943D+hDMGgyjMhSAz/iDz86ZV72qaak/
github.com/bombsimon/logrusr/v3 v3.1.0/go.mod h1:PksPPgSFEL2I52pla2glgCyyd2OqOHAnFF5E+g8Ixco=
github.com/bufbuild/protocompile v0.14.1 h1:iA73zAf/fyljNjQKwYzUHD6AD4R8KMasmwa/FBatYVw=
github.com/bufbuild/protocompile v0.14.1/go.mod h1:ppVdAIhbr2H8asPk6k4pY7t9zB1OU5DoEw9xY/FUi1c=
github.com/bytecodealliance/wasmtime-go/v3 v3.0.2 h1:3uZCA/BLTIu+DqCfguByNMJa2HVHpXvjfy0Dy7g6fuA=
github.com/bytecodealliance/wasmtime-go/v3 v3.0.2/go.mod h1:RnUjnIXxEJcL6BgCvNyzCCRzZcxCgsZCi+RNlvYor5Q=
github.com/bytecodealliance/wasmtime-go/v37 v37.0.0 h1:DPjdn2V3JhXHMoZ2ymRqGK+y1bDyr9wgpyYCvhjMky8=
github.com/bytecodealliance/wasmtime-go/v37 v37.0.0/go.mod h1:Pf1l2JCTUFMnOqDIwkjzx1qfVJ09xbaXETKgRVE4jZ0=
github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
@@ -212,11 +212,13 @@ github.com/davidbyttow/govips/v2 v2.16.0 h1:1nH/Rbx8qZP1hd+oYL9fYQjAnm1+KorX9s07
github.com/davidbyttow/govips/v2 v2.16.0/go.mod h1:clH5/IDVmG5eVyc23qYpyi7kmOT0B/1QNTKtci4RkyM=
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40=
github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE=
github.com/desertbit/timer v1.0.1 h1:yRpYNn5Vaaj6QXecdLMPMJsW81JLiI1eokUft5nBmeo=
github.com/desertbit/timer v1.0.1/go.mod h1:htRrYeY5V/t4iu1xCJ5XsQvp4xve8QulXXctAzxqcwE=
github.com/dgraph-io/badger/v4 v4.7.0 h1:Q+J8HApYAY7UMpL8d9owqiB+odzEc0zn/aqOD9jhc6Y=
github.com/dgraph-io/badger/v4 v4.7.0/go.mod h1:He7TzG3YBy3j4f5baj5B7Zl2XyfNe5bl4Udl0aPemVA=
github.com/dgraph-io/badger/v4 v4.8.0 h1:JYph1ChBijCw8SLeybvPINizbDKWZ5n/GYbz2yhN/bs=
github.com/dgraph-io/badger/v4 v4.8.0/go.mod h1:U6on6e8k/RTbUWxqKR0MvugJuVmkxSNc79ap4917h4w=
github.com/dgraph-io/ristretto v0.2.0 h1:XAfl+7cmoUDWW/2Lx8TGZQjjxIQ2Ley9DSf52dru4WE=
github.com/dgraph-io/ristretto v0.2.0/go.mod h1:8uBHCU/PBV4Ag0CJrP47b9Ofby5dqWNh4FicAdoqFNU=
github.com/dgraph-io/ristretto/v2 v2.2.0 h1:bkY3XzJcXoMuELV8F+vS8kzNgicwQFAaGINAEJdWGOM=
@@ -607,6 +609,22 @@ github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
github.com/leonelquinteros/gotext v1.7.2 h1:bDPndU8nt+/kRo1m4l/1OXiiy2v7Z7dfPQ9+YP7G1Mc=
github.com/leonelquinteros/gotext v1.7.2/go.mod h1:9/haCkm5P7Jay1sxKDGJ5WIg4zkz8oZKw4ekNpALob8=
github.com/lestrrat-go/blackmagic v1.0.4 h1:IwQibdnf8l2KoO+qC3uT4OaTWsW7tuRQXy9TRN9QanA=
github.com/lestrrat-go/blackmagic v1.0.4/go.mod h1:6AWFyKNNj0zEXQYfTMPfZrAXUWUfTIZ5ECEUEJaijtw=
github.com/lestrrat-go/dsig v1.0.0 h1:OE09s2r9Z81kxzJYRn07TFM9XA4akrUdoMwr0L8xj38=
github.com/lestrrat-go/dsig v1.0.0/go.mod h1:dEgoOYYEJvW6XGbLasr8TFcAxoWrKlbQvmJgCR0qkDo=
github.com/lestrrat-go/dsig-secp256k1 v1.0.0 h1:JpDe4Aybfl0soBvoVwjqDbp+9S1Y2OM7gcrVVMFPOzY=
github.com/lestrrat-go/dsig-secp256k1 v1.0.0/go.mod h1:CxUgAhssb8FToqbL8NjSPoGQlnO4w3LG1P0qPWQm/NU=
github.com/lestrrat-go/httpcc v1.0.1 h1:ydWCStUeJLkpYyjLDHihupbn2tYmZ7m22BGkcvZZrIE=
github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E=
github.com/lestrrat-go/httprc/v3 v3.0.1 h1:3n7Es68YYGZb2Jf+k//llA4FTZMl3yCwIjFIk4ubevI=
github.com/lestrrat-go/httprc/v3 v3.0.1/go.mod h1:2uAvmbXE4Xq8kAUjVrZOq1tZVYYYs5iP62Cmtru00xk=
github.com/lestrrat-go/jwx/v3 v3.0.11 h1:yEeUGNUuNjcez/Voxvr7XPTYNraSQTENJgtVTfwvG/w=
github.com/lestrrat-go/jwx/v3 v3.0.11/go.mod h1:XSOAh2SiXm0QgRe3DulLZLyt+wUuEdFo81zuKTLcvgQ=
github.com/lestrrat-go/option v1.0.1 h1:oAzP2fvZGQKWkvHa1/SAcFolBEca1oN+mQ7eooNBEYU=
github.com/lestrrat-go/option v1.0.1/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I=
github.com/lestrrat-go/option/v2 v2.0.0 h1:XxrcaJESE1fokHy3FpaQ/cXW8ZsIdWcdFzzLOcID3Ss=
github.com/lestrrat-go/option/v2 v2.0.0/go.mod h1:oSySsmzMoR0iRzCDCaUfsCzxQHUEuhOViQObyy7S6Vg=
github.com/libregraph/idm v0.5.0 h1:tDMwKbAOZzdeDYMxVlY5PbSqRKO7dbAW9KT42A51WSk=
github.com/libregraph/idm v0.5.0/go.mod h1:BGMwIQ/6orJSPVzJ1x6kgG2JyG9GY05YFmbsnaD80k0=
github.com/libregraph/lico v0.66.0 h1:7T6fD1YF0Ep9n0g4KN6dvWHTlDC3awrQpgsP5GdYCF4=
@@ -710,8 +728,8 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A=
github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k=
github.com/open-policy-agent/opa v1.6.0 h1:/S/cnNQJ2MUMNzizHPbisTWBHowmLkPrugY5jjkPlRQ=
github.com/open-policy-agent/opa v1.6.0/go.mod h1:zFmw4P+W62+CWGYRDDswfVYSCnPo6oYaktQnfIaRFC4=
github.com/open-policy-agent/opa v1.10.1 h1:haIvxZSPky8HLjRrvQwWAjCPLg8JDFSZMbbG4yyUHgY=
github.com/open-policy-agent/opa v1.10.1/go.mod h1:7uPI3iRpOalJ0BhK6s1JALWPU9HvaV1XeBSSMZnr/PM=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
@@ -810,6 +828,8 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd h1:CmH9+J6ZSsIjUK3dcGsnCnO41eRBOnY12zwkn5qVwgc=
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk=
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
github.com/segmentio/kafka-go v0.4.49 h1:GJiNX1d/g+kG6ljyJEoi9++PUMdXGAxb7JGPiDCuNmk=
github.com/segmentio/kafka-go v0.4.49/go.mod h1:Y1gn60kzLEEaW28YshXyk2+VCUKbJ3Qr6DrnT3i4+9E=
github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c=
@@ -832,18 +852,19 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af h1:Sp5TG9f7K39yfB+If0vjp97vuT74F72r8hfRpP8jLU0=
github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/skeema/knownhosts v1.3.0 h1:AM+y0rI04VksttfwjkSTNQorvGqmwATnvnAHpSgc0LY=
github.com/skeema/knownhosts v1.3.0/go.mod h1:sPINvnADmT/qYH1kfv+ePMmOBTH6Tbl7b5LvTDjFK7M=
github.com/spacewander/go-suffix-tree v0.0.0-20191010040751-0865e368c784 h1:0jjO3HdJfOn6gYHD/ZNZh0LLMxEAqkYX7xoDPQReEgs=
github.com/spacewander/go-suffix-tree v0.0.0-20191010040751-0865e368c784/go.mod h1:ff/5myEGgtsAwf26goQCO905GrEm5ugEZSd6OWTsrhM=
github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I=
github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg=
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
@@ -894,6 +915,8 @@ github.com/tus/tusd/v2 v2.8.0 h1:X2jGxQ05jAW4inDd2ogmOKqwnb4c/D0lw2yhgHayWyU=
github.com/tus/tusd/v2 v2.8.0/go.mod h1:3/zEOVQQIwmJhvNam8phV4x/UQt68ZmZiTzeuJUNhVo=
github.com/urfave/cli/v2 v2.27.7 h1:bH59vdhbjLv3LAvIu6gd0usJHgoTTPhCFib8qqOwXYU=
github.com/urfave/cli/v2 v2.27.7/go.mod h1:CyNAG/xg+iAOg0N4MPGZqVmv2rCoP267496AOXUZjA4=
github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ=
github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY=
github.com/vektah/gqlparser/v2 v2.5.30 h1:EqLwGAFLIzt1wpx1IPpY67DwUujF1OfzgEyDsLrN6kE=
github.com/vektah/gqlparser/v2 v2.5.30/go.mod h1:D1/VCZtV3LPnQrcPBeR/q5jkSQIPti0uYCP/RI0gIeo=
github.com/wk8/go-ordered-map v1.0.0 h1:BV7z+2PaK8LTSd/mWgY12HyMAo5CEgkHqbkVq2thqr8=
@@ -955,8 +978,8 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZF
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0 h1:bDMKF3RUSxshZ5OjOTi8rsHGaPKsAt76FaqgvIUySLc=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0/go.mod h1:dDT67G/IkA46Mr2l9Uj7HsQVwsjASyV9SjGofsiUZDA=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 h1:aTL7F04bJHUlztTsNGJ2l+6he8c+y/b//eR0jjjemT4=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0/go.mod h1:kldtb7jDTeol0l3ewcmd8SDvx3EmIE7lyvqbasU3QC4=
go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
@@ -1405,7 +1428,7 @@ honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/yaml v1.5.0 h1:M10b2U7aEUY6hRtU870n2VTPgR5RZiL/I6Lcc2F4NUQ=
sigs.k8s.io/yaml v1.5.0/go.mod h1:wZs27Rbxoai4C0f8/9urLZtZtF3avA3gKvGyPdDqTO4=
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
stash.kopano.io/kgol/rndm v1.1.2 h1:vriNehb5NuglfGqZPkgeFr2Y5AjXtQCF4vEl4kqc6nc=
stash.kopano.io/kgol/rndm v1.1.2/go.mod h1:CBvpAHlOwyu/XipxfLGk02UN3K3P6hQ8E2JoTbNWfJU=

View File

@@ -0,0 +1,17 @@
ISC License
Copyright (c) 2013-2017 The btcsuite developers
Copyright (c) 2015-2024 The Decred developers
Copyright (c) 2017 The Lightning Network Developers
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

View File

@@ -0,0 +1,72 @@
secp256k1
=========
[![Build Status](https://github.com/decred/dcrd/workflows/Build%20and%20Test/badge.svg)](https://github.com/decred/dcrd/actions)
[![ISC License](https://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![Doc](https://img.shields.io/badge/doc-reference-blue.svg)](https://pkg.go.dev/github.com/decred/dcrd/dcrec/secp256k1/v4)
Package secp256k1 implements optimized secp256k1 elliptic curve operations.
This package provides an optimized pure Go implementation of elliptic curve
cryptography operations over the secp256k1 curve as well as data structures and
functions for working with public and private secp256k1 keys. See
https://www.secg.org/sec2-v2.pdf for details on the standard.
In addition, sub packages are provided to produce, verify, parse, and serialize
ECDSA signatures and EC-Schnorr-DCRv0 (a custom Schnorr-based signature scheme
specific to Decred) signatures. See the README.md files in the relevant sub
packages for more details about those aspects.
An overview of the features provided by this package are as follows:
- Private key generation, serialization, and parsing
- Public key generation, serialization and parsing per ANSI X9.62-1998
- Parses uncompressed, compressed, and hybrid public keys
- Serializes uncompressed and compressed public keys
- Specialized types for performing optimized and constant time field operations
- `FieldVal` type for working modulo the secp256k1 field prime
- `ModNScalar` type for working modulo the secp256k1 group order
- Elliptic curve operations in Jacobian projective coordinates
- Point addition
- Point doubling
- Scalar multiplication with an arbitrary point
- Scalar multiplication with the base point (group generator)
- Point decompression from a given x coordinate
- Nonce generation via RFC6979 with support for extra data and version
information that can be used to prevent nonce reuse between signing algorithms
It also provides an implementation of the Go standard library `crypto/elliptic`
`Curve` interface via the `S256` function so that it may be used with other
packages in the standard library such as `crypto/tls`, `crypto/x509`, and
`crypto/ecdsa`. However, in the case of ECDSA, it is highly recommended to use
the `ecdsa` sub package of this package instead since it is optimized
specifically for secp256k1 and is significantly faster as a result.
Although this package was primarily written for dcrd, it has intentionally been
designed so it can be used as a standalone package for any projects needing to
use optimized secp256k1 elliptic curve cryptography.
Finally, a comprehensive suite of tests is provided to provide a high level of
quality assurance.
## secp256k1 use in Decred
At the time of this writing, the primary public key cryptography in widespread
use on the Decred network used to secure coins is based on elliptic curves
defined by the secp256k1 domain parameters.
## Installation and Updating
This package is part of the `github.com/decred/dcrd/dcrec/secp256k1/v4` module.
Use the standard go tooling for working with modules to incorporate it.
## Examples
* [Encryption](https://pkg.go.dev/github.com/decred/dcrd/dcrec/secp256k1/v4#example-package-EncryptDecryptMessage)
Demonstrates encrypting and decrypting a message using a shared key derived
through ECDHE.
## License
Package secp256k1 is licensed under the [copyfree](http://copyfree.org) ISC
License.

File diff suppressed because one or more lines are too long

1310
vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/curve.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,14 @@
// Copyright (c) 2024 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
//go:build tinygo
package secp256k1
// This file contains the variants suitable for
// memory or storage constrained environments.
func scalarBaseMultNonConst(k *ModNScalar, result *JacobianPoint) {
scalarBaseMultNonConstSlow(k, result)
}

View File

@@ -0,0 +1,14 @@
// Copyright (c) 2024 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
//go:build !tinygo
package secp256k1
// This file contains the variants that don't fit in
// memory or storage constrained environments.
func scalarBaseMultNonConst(k *ModNScalar, result *JacobianPoint) {
scalarBaseMultNonConstFast(k, result)
}

View File

@@ -0,0 +1,59 @@
// Copyright (c) 2013-2014 The btcsuite developers
// Copyright (c) 2015-2022 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
/*
Package secp256k1 implements optimized secp256k1 elliptic curve operations in
pure Go.
This package provides an optimized pure Go implementation of elliptic curve
cryptography operations over the secp256k1 curve as well as data structures and
functions for working with public and private secp256k1 keys. See
https://www.secg.org/sec2-v2.pdf for details on the standard.
In addition, sub packages are provided to produce, verify, parse, and serialize
ECDSA signatures and EC-Schnorr-DCRv0 (a custom Schnorr-based signature scheme
specific to Decred) signatures. See the README.md files in the relevant sub
packages for more details about those aspects.
An overview of the features provided by this package are as follows:
- Private key generation, serialization, and parsing
- Public key generation, serialization and parsing per ANSI X9.62-1998
- Parses uncompressed, compressed, and hybrid public keys
- Serializes uncompressed and compressed public keys
- Specialized types for performing optimized and constant time field operations
- FieldVal type for working modulo the secp256k1 field prime
- ModNScalar type for working modulo the secp256k1 group order
- Elliptic curve operations in Jacobian projective coordinates
- Point addition
- Point doubling
- Scalar multiplication with an arbitrary point
- Scalar multiplication with the base point (group generator)
- Point decompression from a given x coordinate
- Nonce generation via RFC6979 with support for extra data and version
information that can be used to prevent nonce reuse between signing
algorithms
It also provides an implementation of the Go standard library crypto/elliptic
Curve interface via the S256 function so that it may be used with other packages
in the standard library such as crypto/tls, crypto/x509, and crypto/ecdsa.
However, in the case of ECDSA, it is highly recommended to use the ecdsa sub
package of this package instead since it is optimized specifically for secp256k1
and is significantly faster as a result.
Although this package was primarily written for dcrd, it has intentionally been
designed so it can be used as a standalone package for any projects needing to
use optimized secp256k1 elliptic curve cryptography.
Finally, a comprehensive suite of tests is provided to provide a high level of
quality assurance.
# Use of secp256k1 in Decred
At the time of this writing, the primary public key cryptography in widespread
use on the Decred network used to secure coins is based on elliptic curves
defined by the secp256k1 domain parameters.
*/
package secp256k1

View File

@@ -0,0 +1,21 @@
// Copyright (c) 2015 The btcsuite developers
// Copyright (c) 2015-2023 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package secp256k1
// GenerateSharedSecret generates a shared secret based on a private key and a
// public key using Diffie-Hellman key exchange (ECDH) (RFC 5903).
// RFC5903 Section 9 states we should only return x.
//
// It is recommended to securely hash the result before using as a cryptographic
// key.
func GenerateSharedSecret(privkey *PrivateKey, pubkey *PublicKey) []byte {
var point, result JacobianPoint
pubkey.AsJacobian(&point)
ScalarMultNonConst(&privkey.Key, &point, &result)
result.ToAffine()
xBytes := result.X.Bytes()
return xBytes[:]
}

View File

@@ -0,0 +1,255 @@
// Copyright 2020-2022 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package secp256k1
// References:
// [SECG]: Recommended Elliptic Curve Domain Parameters
// https://www.secg.org/sec2-v2.pdf
//
// [GECC]: Guide to Elliptic Curve Cryptography (Hankerson, Menezes, Vanstone)
import (
"crypto/ecdsa"
"crypto/elliptic"
"math/big"
)
// CurveParams contains the parameters for the secp256k1 curve.
type CurveParams struct {
// P is the prime used in the secp256k1 field.
P *big.Int
// N is the order of the secp256k1 curve group generated by the base point.
N *big.Int
// Gx and Gy are the x and y coordinate of the base point, respectively.
Gx, Gy *big.Int
// BitSize is the size of the underlying secp256k1 field in bits.
BitSize int
// H is the cofactor of the secp256k1 curve.
H int
// ByteSize is simply the bit size / 8 and is provided for convenience
// since it is calculated repeatedly.
ByteSize int
}
// Curve parameters taken from [SECG] section 2.4.1.
var curveParams = CurveParams{
P: fromHex("fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f"),
N: fromHex("fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141"),
Gx: fromHex("79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798"),
Gy: fromHex("483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8"),
BitSize: 256,
H: 1,
ByteSize: 256 / 8,
}
// Params returns the secp256k1 curve parameters for convenience.
func Params() *CurveParams {
return &curveParams
}
// KoblitzCurve provides an implementation for secp256k1 that fits the ECC Curve
// interface from crypto/elliptic.
type KoblitzCurve struct {
*elliptic.CurveParams
}
// bigAffineToJacobian takes an affine point (x, y) as big integers and converts
// it to Jacobian point with Z=1.
func bigAffineToJacobian(x, y *big.Int, result *JacobianPoint) {
result.X.SetByteSlice(x.Bytes())
result.Y.SetByteSlice(y.Bytes())
result.Z.SetInt(1)
}
// jacobianToBigAffine takes a Jacobian point (x, y, z) as field values and
// converts it to an affine point as big integers.
func jacobianToBigAffine(point *JacobianPoint) (*big.Int, *big.Int) {
point.ToAffine()
// Convert the field values for the now affine point to big.Ints.
x3, y3 := new(big.Int), new(big.Int)
x3.SetBytes(point.X.Bytes()[:])
y3.SetBytes(point.Y.Bytes()[:])
return x3, y3
}
// Params returns the parameters for the curve.
//
// This is part of the elliptic.Curve interface implementation.
func (curve *KoblitzCurve) Params() *elliptic.CurveParams {
return curve.CurveParams
}
// IsOnCurve returns whether or not the affine point (x,y) is on the curve.
//
// This is part of the elliptic.Curve interface implementation. This function
// differs from the crypto/elliptic algorithm since a = 0 not -3.
func (curve *KoblitzCurve) IsOnCurve(x, y *big.Int) bool {
// Convert big ints to a Jacobian point for faster arithmetic.
var point JacobianPoint
bigAffineToJacobian(x, y, &point)
return isOnCurve(&point.X, &point.Y)
}
// Add returns the sum of (x1,y1) and (x2,y2).
//
// This is part of the elliptic.Curve interface implementation.
func (curve *KoblitzCurve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) {
// The point at infinity is the identity according to the group law for
// elliptic curve cryptography. Thus, ∞ + P = P and P + ∞ = P.
if x1.Sign() == 0 && y1.Sign() == 0 {
return x2, y2
}
if x2.Sign() == 0 && y2.Sign() == 0 {
return x1, y1
}
// Convert the affine coordinates from big integers to Jacobian points,
// do the point addition in Jacobian projective space, and convert the
// Jacobian point back to affine big.Ints.
var p1, p2, result JacobianPoint
bigAffineToJacobian(x1, y1, &p1)
bigAffineToJacobian(x2, y2, &p2)
AddNonConst(&p1, &p2, &result)
return jacobianToBigAffine(&result)
}
// Double returns 2*(x1,y1).
//
// This is part of the elliptic.Curve interface implementation.
func (curve *KoblitzCurve) Double(x1, y1 *big.Int) (*big.Int, *big.Int) {
if y1.Sign() == 0 {
return new(big.Int), new(big.Int)
}
// Convert the affine coordinates from big integers to Jacobian points,
// do the point doubling in Jacobian projective space, and convert the
// Jacobian point back to affine big.Ints.
var point, result JacobianPoint
bigAffineToJacobian(x1, y1, &point)
DoubleNonConst(&point, &result)
return jacobianToBigAffine(&result)
}
// moduloReduce reduces k from more than 32 bytes to 32 bytes and under. This
// is done by doing a simple modulo curve.N. We can do this since G^N = 1 and
// thus any other valid point on the elliptic curve has the same order.
func moduloReduce(k []byte) []byte {
// Since the order of G is curve.N, we can use a much smaller number by
// doing modulo curve.N
if len(k) > curveParams.ByteSize {
tmpK := new(big.Int).SetBytes(k)
tmpK.Mod(tmpK, curveParams.N)
return tmpK.Bytes()
}
return k
}
// ScalarMult returns k*(bx, by) where k is a big endian integer.
//
// This is part of the elliptic.Curve interface implementation.
func (curve *KoblitzCurve) ScalarMult(bx, by *big.Int, k []byte) (*big.Int, *big.Int) {
// Convert the affine coordinates from big integers to Jacobian points,
// do the multiplication in Jacobian projective space, and convert the
// Jacobian point back to affine big.Ints.
var kModN ModNScalar
kModN.SetByteSlice(moduloReduce(k))
var point, result JacobianPoint
bigAffineToJacobian(bx, by, &point)
ScalarMultNonConst(&kModN, &point, &result)
return jacobianToBigAffine(&result)
}
// ScalarBaseMult returns k*G where G is the base point of the group and k is a
// big endian integer.
//
// This is part of the elliptic.Curve interface implementation.
func (curve *KoblitzCurve) ScalarBaseMult(k []byte) (*big.Int, *big.Int) {
// Perform the multiplication and convert the Jacobian point back to affine
// big.Ints.
var kModN ModNScalar
kModN.SetByteSlice(moduloReduce(k))
var result JacobianPoint
ScalarBaseMultNonConst(&kModN, &result)
return jacobianToBigAffine(&result)
}
// X returns the x coordinate of the public key.
func (p *PublicKey) X() *big.Int {
return new(big.Int).SetBytes(p.x.Bytes()[:])
}
// Y returns the y coordinate of the public key.
func (p *PublicKey) Y() *big.Int {
return new(big.Int).SetBytes(p.y.Bytes()[:])
}
// ToECDSA returns the public key as a *ecdsa.PublicKey.
func (p *PublicKey) ToECDSA() *ecdsa.PublicKey {
return &ecdsa.PublicKey{
Curve: S256(),
X: p.X(),
Y: p.Y(),
}
}
// ToECDSA returns the private key as a *ecdsa.PrivateKey.
func (p *PrivateKey) ToECDSA() *ecdsa.PrivateKey {
var privKeyBytes [PrivKeyBytesLen]byte
p.Key.PutBytes(&privKeyBytes)
var result JacobianPoint
ScalarBaseMultNonConst(&p.Key, &result)
x, y := jacobianToBigAffine(&result)
newPrivKey := &ecdsa.PrivateKey{
PublicKey: ecdsa.PublicKey{
Curve: S256(),
X: x,
Y: y,
},
D: new(big.Int).SetBytes(privKeyBytes[:]),
}
zeroArray32(&privKeyBytes)
return newPrivKey
}
// fromHex converts the passed hex string into a big integer pointer and will
// panic is there is an error. This is only provided for the hard-coded
// constants so errors in the source code can bet detected. It will only (and
// must only) be called for initialization purposes.
func fromHex(s string) *big.Int {
if s == "" {
return big.NewInt(0)
}
r, ok := new(big.Int).SetString(s, 16)
if !ok {
panic("invalid hex in source file: " + s)
}
return r
}
// secp256k1 is a global instance of the KoblitzCurve implementation which in
// turn embeds and implements elliptic.CurveParams.
var secp256k1 = &KoblitzCurve{
CurveParams: &elliptic.CurveParams{
P: curveParams.P,
N: curveParams.N,
B: fromHex("0000000000000000000000000000000000000000000000000000000000000007"),
Gx: curveParams.Gx,
Gy: curveParams.Gy,
BitSize: curveParams.BitSize,
Name: "secp256k1",
},
}
// S256 returns an elliptic.Curve which implements secp256k1.
func S256() *KoblitzCurve {
return secp256k1
}

View File

@@ -0,0 +1,67 @@
// Copyright (c) 2020 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package secp256k1
// ErrorKind identifies a kind of error. It has full support for errors.Is and
// errors.As, so the caller can directly check against an error kind when
// determining the reason for an error.
type ErrorKind string
// These constants are used to identify a specific RuleError.
const (
// ErrPubKeyInvalidLen indicates that the length of a serialized public
// key is not one of the allowed lengths.
ErrPubKeyInvalidLen = ErrorKind("ErrPubKeyInvalidLen")
// ErrPubKeyInvalidFormat indicates an attempt was made to parse a public
// key that does not specify one of the supported formats.
ErrPubKeyInvalidFormat = ErrorKind("ErrPubKeyInvalidFormat")
// ErrPubKeyXTooBig indicates that the x coordinate for a public key
// is greater than or equal to the prime of the field underlying the group.
ErrPubKeyXTooBig = ErrorKind("ErrPubKeyXTooBig")
// ErrPubKeyYTooBig indicates that the y coordinate for a public key is
// greater than or equal to the prime of the field underlying the group.
ErrPubKeyYTooBig = ErrorKind("ErrPubKeyYTooBig")
// ErrPubKeyNotOnCurve indicates that a public key is not a point on the
// secp256k1 curve.
ErrPubKeyNotOnCurve = ErrorKind("ErrPubKeyNotOnCurve")
// ErrPubKeyMismatchedOddness indicates that a hybrid public key specified
// an oddness of the y coordinate that does not match the actual oddness of
// the provided y coordinate.
ErrPubKeyMismatchedOddness = ErrorKind("ErrPubKeyMismatchedOddness")
)
// Error satisfies the error interface and prints human-readable errors.
func (e ErrorKind) Error() string {
return string(e)
}
// Error identifies an error related to public key cryptography using a
// sec256k1 curve. It has full support for errors.Is and errors.As, so the
// caller can ascertain the specific reason for the error by checking
// the underlying error.
type Error struct {
Err error
Description string
}
// Error satisfies the error interface and prints human-readable errors.
func (e Error) Error() string {
return e.Description
}
// Unwrap returns the underlying wrapped error.
func (e Error) Unwrap() error {
return e.Err
}
// makeError creates an Error given a set of arguments.
func makeError(kind ErrorKind, desc string) Error {
return Error{Err: kind, Description: desc}
}

1696
vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/field.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,91 @@
// Copyright 2015 The btcsuite developers
// Copyright (c) 2015-2022 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package secp256k1
import (
"compress/zlib"
"encoding/base64"
"io"
"strings"
"sync"
)
//go:generate go run genprecomps.go
// bytePointTable describes a table used to house pre-computed values for
// accelerating scalar base multiplication.
type bytePointTable [32][256]JacobianPoint
// compressedBytePointsFn is set to a real function by the code generation to
// return the compressed pre-computed values for accelerating scalar base
// multiplication.
var compressedBytePointsFn func() string
// s256BytePoints houses pre-computed values used to accelerate scalar base
// multiplication such that they are only loaded on first use.
var s256BytePoints = func() func() *bytePointTable {
// mustLoadBytePoints decompresses and deserializes the pre-computed byte
// points used to accelerate scalar base multiplication for the secp256k1
// curve.
//
// This approach is used since it allows the compile to use significantly
// less ram and be performed much faster than it is with hard-coding the
// final in-memory data structure. At the same time, it is quite fast to
// generate the in-memory data structure on first use with this approach
// versus computing the table.
//
// It will panic on any errors because the data is hard coded and thus any
// errors means something is wrong in the source code.
var data *bytePointTable
mustLoadBytePoints := func() {
// There will be no byte points to load when generating them.
if compressedBytePointsFn == nil {
return
}
bp := compressedBytePointsFn()
// Decompress the pre-computed table used to accelerate scalar base
// multiplication.
decoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(bp))
r, err := zlib.NewReader(decoder)
if err != nil {
panic(err)
}
serialized, err := io.ReadAll(r)
if err != nil {
panic(err)
}
// Deserialize the precomputed byte points and set the memory table to
// them.
offset := 0
var bytePoints bytePointTable
for byteNum := 0; byteNum < len(bytePoints); byteNum++ {
// All points in this window.
for i := 0; i < len(bytePoints[byteNum]); i++ {
p := &bytePoints[byteNum][i]
p.X.SetByteSlice(serialized[offset:])
offset += 32
p.Y.SetByteSlice(serialized[offset:])
offset += 32
p.Z.SetInt(1)
}
}
data = &bytePoints
}
// Return a closure that initializes the data on first access. This is done
// because the table takes a non-trivial amount of memory and initializing
// it unconditionally would cause anything that imports the package, either
// directly, or indirectly via transitive deps, to use that memory even if
// the caller never accesses any parts of the package that actually needs
// access to it.
var loadBytePointsOnce sync.Once
return func() *bytePointTable {
loadBytePointsOnce.Do(mustLoadBytePoints)
return data
}
}()

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,263 @@
// Copyright (c) 2013-2014 The btcsuite developers
// Copyright (c) 2015-2024 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package secp256k1
import (
"bytes"
"crypto/sha256"
"hash"
)
// References:
// [GECC]: Guide to Elliptic Curve Cryptography (Hankerson, Menezes, Vanstone)
//
// [ISO/IEC 8825-1]: Information technology — ASN.1 encoding rules:
// Specification of Basic Encoding Rules (BER), Canonical Encoding Rules
// (CER) and Distinguished Encoding Rules (DER)
//
// [SEC1]: Elliptic Curve Cryptography (May 31, 2009, Version 2.0)
// https://www.secg.org/sec1-v2.pdf
var (
// singleZero is used during RFC6979 nonce generation. It is provided
// here to avoid the need to create it multiple times.
singleZero = []byte{0x00}
// zeroInitializer is used during RFC6979 nonce generation. It is provided
// here to avoid the need to create it multiple times.
zeroInitializer = bytes.Repeat([]byte{0x00}, sha256.BlockSize)
// singleOne is used during RFC6979 nonce generation. It is provided
// here to avoid the need to create it multiple times.
singleOne = []byte{0x01}
// oneInitializer is used during RFC6979 nonce generation. It is provided
// here to avoid the need to create it multiple times.
oneInitializer = bytes.Repeat([]byte{0x01}, sha256.Size)
)
// hmacsha256 implements a resettable version of HMAC-SHA256.
type hmacsha256 struct {
inner, outer hash.Hash
ipad, opad [sha256.BlockSize]byte
}
// Write adds data to the running hash.
func (h *hmacsha256) Write(p []byte) {
h.inner.Write(p)
}
// initKey initializes the HMAC-SHA256 instance to the provided key.
func (h *hmacsha256) initKey(key []byte) {
// Hash the key if it is too large.
if len(key) > sha256.BlockSize {
h.outer.Write(key)
key = h.outer.Sum(nil)
}
copy(h.ipad[:], key)
copy(h.opad[:], key)
for i := range h.ipad {
h.ipad[i] ^= 0x36
}
for i := range h.opad {
h.opad[i] ^= 0x5c
}
h.inner.Write(h.ipad[:])
}
// ResetKey resets the HMAC-SHA256 to its initial state and then initializes it
// with the provided key. It is equivalent to creating a new instance with the
// provided key without allocating more memory.
func (h *hmacsha256) ResetKey(key []byte) {
h.inner.Reset()
h.outer.Reset()
copy(h.ipad[:], zeroInitializer)
copy(h.opad[:], zeroInitializer)
h.initKey(key)
}
// Resets the HMAC-SHA256 to its initial state using the current key.
func (h *hmacsha256) Reset() {
h.inner.Reset()
h.inner.Write(h.ipad[:])
}
// Sum returns the hash of the written data.
func (h *hmacsha256) Sum() []byte {
h.outer.Reset()
h.outer.Write(h.opad[:])
h.outer.Write(h.inner.Sum(nil))
return h.outer.Sum(nil)
}
// newHMACSHA256 returns a new HMAC-SHA256 hasher using the provided key.
func newHMACSHA256(key []byte) *hmacsha256 {
h := new(hmacsha256)
h.inner = sha256.New()
h.outer = sha256.New()
h.initKey(key)
return h
}
// NonceRFC6979 generates a nonce deterministically according to RFC 6979 using
// HMAC-SHA256 for the hashing function. It takes a 32-byte hash as an input
// and returns a 32-byte nonce to be used for deterministic signing. The extra
// and version arguments are optional, but allow additional data to be added to
// the input of the HMAC. When provided, the extra data must be 32-bytes and
// version must be 16 bytes or they will be ignored.
//
// Finally, the extraIterations parameter provides a method to produce a stream
// of deterministic nonces to ensure the signing code is able to produce a nonce
// that results in a valid signature in the extremely unlikely event the
// original nonce produced results in an invalid signature (e.g. R == 0).
// Signing code should start with 0 and increment it if necessary.
func NonceRFC6979(privKey []byte, hash []byte, extra []byte, version []byte, extraIterations uint32) *ModNScalar {
// Input to HMAC is the 32-byte private key and the 32-byte hash. In
// addition, it may include the optional 32-byte extra data and 16-byte
// version. Create a fixed-size array to avoid extra allocs and slice it
// properly.
const (
privKeyLen = 32
hashLen = 32
extraLen = 32
versionLen = 16
)
var keyBuf [privKeyLen + hashLen + extraLen + versionLen]byte
// Truncate rightmost bytes of private key and hash if they are too long and
// leave left padding of zeros when they're too short.
if len(privKey) > privKeyLen {
privKey = privKey[:privKeyLen]
}
if len(hash) > hashLen {
hash = hash[:hashLen]
}
offset := privKeyLen - len(privKey) // Zero left padding if needed.
offset += copy(keyBuf[offset:], privKey)
offset += hashLen - len(hash) // Zero left padding if needed.
offset += copy(keyBuf[offset:], hash)
if len(extra) == extraLen {
offset += copy(keyBuf[offset:], extra)
if len(version) == versionLen {
offset += copy(keyBuf[offset:], version)
}
} else if len(version) == versionLen {
// When the version was specified, but not the extra data, leave the
// extra data portion all zero.
offset += privKeyLen
offset += copy(keyBuf[offset:], version)
}
key := keyBuf[:offset]
// Step B.
//
// V = 0x01 0x01 0x01 ... 0x01 such that the length of V, in bits, is
// equal to 8*ceil(hashLen/8).
//
// Note that since the hash length is a multiple of 8 for the chosen hash
// function in this optimized implementation, the result is just the hash
// length, so avoid the extra calculations. Also, since it isn't modified,
// start with a global value.
v := oneInitializer
// Step C (Go zeroes all allocated memory).
//
// K = 0x00 0x00 0x00 ... 0x00 such that the length of K, in bits, is
// equal to 8*ceil(hashLen/8).
//
// As above, since the hash length is a multiple of 8 for the chosen hash
// function in this optimized implementation, the result is just the hash
// length, so avoid the extra calculations.
k := zeroInitializer[:hashLen]
// Step D.
//
// K = HMAC_K(V || 0x00 || int2octets(x) || bits2octets(h1))
//
// Note that key is the "int2octets(x) || bits2octets(h1)" portion along
// with potential additional data as described by section 3.6 of the RFC.
hasher := newHMACSHA256(k)
hasher.Write(oneInitializer)
hasher.Write(singleZero)
hasher.Write(key)
k = hasher.Sum()
// Step E.
//
// V = HMAC_K(V)
hasher.ResetKey(k)
hasher.Write(v)
v = hasher.Sum()
// Step F.
//
// K = HMAC_K(V || 0x01 || int2octets(x) || bits2octets(h1))
//
// Note that key is the "int2octets(x) || bits2octets(h1)" portion along
// with potential additional data as described by section 3.6 of the RFC.
hasher.Reset()
hasher.Write(v)
hasher.Write(singleOne)
hasher.Write(key)
k = hasher.Sum()
// Step G.
//
// V = HMAC_K(V)
hasher.ResetKey(k)
hasher.Write(v)
v = hasher.Sum()
// Step H.
//
// Repeat until the value is nonzero and less than the curve order.
var generated uint32
for {
// Step H1 and H2.
//
// Set T to the empty sequence. The length of T (in bits) is denoted
// tlen; thus, at that point, tlen = 0.
//
// While tlen < qlen, do the following:
// V = HMAC_K(V)
// T = T || V
//
// Note that because the hash function output is the same length as the
// private key in this optimized implementation, there is no need to
// loop or create an intermediate T.
hasher.Reset()
hasher.Write(v)
v = hasher.Sum()
// Step H3.
//
// k = bits2int(T)
// If k is within the range [1,q-1], return it.
//
// Otherwise, compute:
// K = HMAC_K(V || 0x00)
// V = HMAC_K(V)
var secret ModNScalar
overflow := secret.SetByteSlice(v)
if !overflow && !secret.IsZero() {
generated++
if generated > extraIterations {
return &secret
}
}
// K = HMAC_K(V || 0x00)
hasher.Reset()
hasher.Write(v)
hasher.Write(singleZero)
k = hasher.Sum()
// V = HMAC_K(V)
hasher.ResetKey(k)
hasher.Write(v)
v = hasher.Sum()
}
}

View File

@@ -0,0 +1,111 @@
// Copyright (c) 2013-2014 The btcsuite developers
// Copyright (c) 2015-2024 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package secp256k1
import (
cryptorand "crypto/rand"
"io"
)
// PrivateKey provides facilities for working with secp256k1 private keys within
// this package and includes functionality such as serializing and parsing them
// as well as computing their associated public key.
type PrivateKey struct {
Key ModNScalar
}
// NewPrivateKey instantiates a new private key from a scalar encoded as a
// big integer.
func NewPrivateKey(key *ModNScalar) *PrivateKey {
return &PrivateKey{Key: *key}
}
// PrivKeyFromBytes returns a private based on the provided byte slice which is
// interpreted as an unsigned 256-bit big-endian integer in the range [0, N-1],
// where N is the order of the curve.
//
// WARNING: This means passing a slice with more than 32 bytes is truncated and
// that truncated value is reduced modulo N. Further, 0 is not a valid private
// key. It is up to the caller to provide a value in the appropriate range of
// [1, N-1]. Failure to do so will either result in an invalid private key or
// potentially weak private keys that have bias that could be exploited.
//
// This function primarily exists to provide a mechanism for converting
// serialized private keys that are already known to be good.
//
// Typically callers should make use of GeneratePrivateKey or
// GeneratePrivateKeyFromRand when creating private keys since they properly
// handle generation of appropriate values.
func PrivKeyFromBytes(privKeyBytes []byte) *PrivateKey {
var privKey PrivateKey
privKey.Key.SetByteSlice(privKeyBytes)
return &privKey
}
// generatePrivateKey generates and returns a new private key that is suitable
// for use with secp256k1 using the provided reader as a source of entropy. The
// provided reader must be a source of cryptographically secure randomness to
// avoid weak private keys.
func generatePrivateKey(rand io.Reader) (*PrivateKey, error) {
// The group order is close enough to 2^256 that there is only roughly a 1
// in 2^128 chance of generating an invalid private key, so this loop will
// virtually never run more than a single iteration in practice.
var key PrivateKey
var b32 [32]byte
for valid := false; !valid; {
if _, err := io.ReadFull(rand, b32[:]); err != nil {
return nil, err
}
// The private key is only valid when it is in the range [1, N-1], where
// N is the order of the curve.
overflow := key.Key.SetBytes(&b32)
valid = (key.Key.IsZeroBit() | overflow) == 0
}
zeroArray32(&b32)
return &key, nil
}
// GeneratePrivateKey generates and returns a new cryptographically secure
// private key that is suitable for use with secp256k1.
func GeneratePrivateKey() (*PrivateKey, error) {
return generatePrivateKey(cryptorand.Reader)
}
// GeneratePrivateKeyFromRand generates a private key that is suitable for use
// with secp256k1 using the provided reader as a source of entropy. The
// provided reader must be a source of cryptographically secure randomness, such
// as [crypto/rand.Reader], to avoid weak private keys.
func GeneratePrivateKeyFromRand(rand io.Reader) (*PrivateKey, error) {
return generatePrivateKey(rand)
}
// PubKey computes and returns the public key corresponding to this private key.
func (p *PrivateKey) PubKey() *PublicKey {
var result JacobianPoint
ScalarBaseMultNonConst(&p.Key, &result)
result.ToAffine()
return NewPublicKey(&result.X, &result.Y)
}
// Zero manually clears the memory associated with the private key. This can be
// used to explicitly clear key material from memory for enhanced security
// against memory scraping.
func (p *PrivateKey) Zero() {
p.Key.Zero()
}
// PrivKeyBytesLen defines the length in bytes of a serialized private key.
const PrivKeyBytesLen = 32
// Serialize returns the private key as a 256-bit big-endian binary-encoded
// number, padded to a length of 32 bytes.
func (p PrivateKey) Serialize() []byte {
var privKeyBytes [PrivKeyBytesLen]byte
p.Key.PutBytes(&privKeyBytes)
return privKeyBytes[:]
}

View File

@@ -0,0 +1,236 @@
// Copyright (c) 2013-2014 The btcsuite developers
// Copyright (c) 2015-2024 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package secp256k1
// References:
// [SEC1] Elliptic Curve Cryptography
// https://www.secg.org/sec1-v2.pdf
//
// [SEC2] Recommended Elliptic Curve Domain Parameters
// https://www.secg.org/sec2-v2.pdf
//
// [ANSI X9.62-1998] Public Key Cryptography For The Financial Services
// Industry: The Elliptic Curve Digital Signature Algorithm (ECDSA)
import (
"fmt"
)
const (
// PubKeyBytesLenCompressed is the number of bytes of a serialized
// compressed public key.
PubKeyBytesLenCompressed = 33
// PubKeyBytesLenUncompressed is the number of bytes of a serialized
// uncompressed public key.
PubKeyBytesLenUncompressed = 65
// PubKeyFormatCompressedEven is the identifier prefix byte for a public key
// whose Y coordinate is even when serialized in the compressed format per
// section 2.3.4 of [SEC1](https://secg.org/sec1-v2.pdf#subsubsection.2.3.4).
PubKeyFormatCompressedEven byte = 0x02
// PubKeyFormatCompressedOdd is the identifier prefix byte for a public key
// whose Y coordinate is odd when serialized in the compressed format per
// section 2.3.4 of [SEC1](https://secg.org/sec1-v2.pdf#subsubsection.2.3.4).
PubKeyFormatCompressedOdd byte = 0x03
// PubKeyFormatUncompressed is the identifier prefix byte for a public key
// when serialized according in the uncompressed format per section 2.3.3 of
// [SEC1](https://secg.org/sec1-v2.pdf#subsubsection.2.3.3).
PubKeyFormatUncompressed byte = 0x04
// PubKeyFormatHybridEven is the identifier prefix byte for a public key
// whose Y coordinate is even when serialized according to the hybrid format
// per section 4.3.6 of [ANSI X9.62-1998].
//
// NOTE: This format makes little sense in practice an therefore this
// package will not produce public keys serialized in this format. However,
// it will parse them since they exist in the wild.
PubKeyFormatHybridEven byte = 0x06
// PubKeyFormatHybridOdd is the identifier prefix byte for a public key
// whose Y coordingate is odd when serialized according to the hybrid format
// per section 4.3.6 of [ANSI X9.62-1998].
//
// NOTE: This format makes little sense in practice an therefore this
// package will not produce public keys serialized in this format. However,
// it will parse them since they exist in the wild.
PubKeyFormatHybridOdd byte = 0x07
)
// PublicKey provides facilities for efficiently working with secp256k1 public
// keys within this package and includes functions to serialize in both
// uncompressed and compressed SEC (Standards for Efficient Cryptography)
// formats.
type PublicKey struct {
x FieldVal
y FieldVal
}
// NewPublicKey instantiates a new public key with the given x and y
// coordinates.
//
// It should be noted that, unlike ParsePubKey, since this accepts arbitrary x
// and y coordinates, it allows creation of public keys that are not valid
// points on the secp256k1 curve. The IsOnCurve method of the returned instance
// can be used to determine validity.
func NewPublicKey(x, y *FieldVal) *PublicKey {
var pubKey PublicKey
pubKey.x.Set(x)
pubKey.y.Set(y)
return &pubKey
}
// ParsePubKey parses a secp256k1 public key encoded according to the format
// specified by ANSI X9.62-1998, which means it is also compatible with the
// SEC (Standards for Efficient Cryptography) specification which is a subset of
// the former. In other words, it supports the uncompressed, compressed, and
// hybrid formats as follows:
//
// Compressed:
//
// <format byte = 0x02/0x03><32-byte X coordinate>
//
// Uncompressed:
//
// <format byte = 0x04><32-byte X coordinate><32-byte Y coordinate>
//
// Hybrid:
//
// <format byte = 0x05/0x06><32-byte X coordinate><32-byte Y coordinate>
//
// NOTE: The hybrid format makes little sense in practice an therefore this
// package will not produce public keys serialized in this format. However,
// this function will properly parse them since they exist in the wild.
func ParsePubKey(serialized []byte) (key *PublicKey, err error) {
var x, y FieldVal
switch len(serialized) {
case PubKeyBytesLenUncompressed:
// Reject unsupported public key formats for the given length.
format := serialized[0]
switch format {
case PubKeyFormatUncompressed:
case PubKeyFormatHybridEven, PubKeyFormatHybridOdd:
default:
str := fmt.Sprintf("invalid public key: unsupported format: %x",
format)
return nil, makeError(ErrPubKeyInvalidFormat, str)
}
// Parse the x and y coordinates while ensuring that they are in the
// allowed range.
if overflow := x.SetByteSlice(serialized[1:33]); overflow {
str := "invalid public key: x >= field prime"
return nil, makeError(ErrPubKeyXTooBig, str)
}
if overflow := y.SetByteSlice(serialized[33:]); overflow {
str := "invalid public key: y >= field prime"
return nil, makeError(ErrPubKeyYTooBig, str)
}
// Ensure the oddness of the y coordinate matches the specified format
// for hybrid public keys.
if format == PubKeyFormatHybridEven || format == PubKeyFormatHybridOdd {
wantOddY := format == PubKeyFormatHybridOdd
if y.IsOdd() != wantOddY {
str := fmt.Sprintf("invalid public key: y oddness does not "+
"match specified value of %v", wantOddY)
return nil, makeError(ErrPubKeyMismatchedOddness, str)
}
}
// Reject public keys that are not on the secp256k1 curve.
if !isOnCurve(&x, &y) {
str := fmt.Sprintf("invalid public key: [%v,%v] not on secp256k1 "+
"curve", x, y)
return nil, makeError(ErrPubKeyNotOnCurve, str)
}
case PubKeyBytesLenCompressed:
// Reject unsupported public key formats for the given length.
format := serialized[0]
switch format {
case PubKeyFormatCompressedEven, PubKeyFormatCompressedOdd:
default:
str := fmt.Sprintf("invalid public key: unsupported format: %x",
format)
return nil, makeError(ErrPubKeyInvalidFormat, str)
}
// Parse the x coordinate while ensuring that it is in the allowed
// range.
if overflow := x.SetByteSlice(serialized[1:33]); overflow {
str := "invalid public key: x >= field prime"
return nil, makeError(ErrPubKeyXTooBig, str)
}
// Attempt to calculate the y coordinate for the given x coordinate such
// that the result pair is a point on the secp256k1 curve and the
// solution with desired oddness is chosen.
wantOddY := format == PubKeyFormatCompressedOdd
if !DecompressY(&x, wantOddY, &y) {
str := fmt.Sprintf("invalid public key: x coordinate %v is not on "+
"the secp256k1 curve", x)
return nil, makeError(ErrPubKeyNotOnCurve, str)
}
default:
str := fmt.Sprintf("malformed public key: invalid length: %d",
len(serialized))
return nil, makeError(ErrPubKeyInvalidLen, str)
}
return NewPublicKey(&x, &y), nil
}
// SerializeUncompressed serializes a public key in the 65-byte uncompressed
// format.
func (p PublicKey) SerializeUncompressed() []byte {
// 0x04 || 32-byte x coordinate || 32-byte y coordinate
var b [PubKeyBytesLenUncompressed]byte
b[0] = PubKeyFormatUncompressed
p.x.PutBytesUnchecked(b[1:33])
p.y.PutBytesUnchecked(b[33:65])
return b[:]
}
// SerializeCompressed serializes a public key in the 33-byte compressed format.
func (p PublicKey) SerializeCompressed() []byte {
// Choose the format byte depending on the oddness of the Y coordinate.
format := PubKeyFormatCompressedEven
if p.y.IsOdd() {
format = PubKeyFormatCompressedOdd
}
// 0x02 or 0x03 || 32-byte x coordinate
var b [PubKeyBytesLenCompressed]byte
b[0] = format
p.x.PutBytesUnchecked(b[1:33])
return b[:]
}
// IsEqual compares this public key instance to the one passed, returning true
// if both public keys are equivalent. A public key is equivalent to another,
// if they both have the same X and Y coordinates.
func (p *PublicKey) IsEqual(otherPubKey *PublicKey) bool {
return p.x.Equals(&otherPubKey.x) && p.y.Equals(&otherPubKey.y)
}
// AsJacobian converts the public key into a Jacobian point with Z=1 and stores
// the result in the provided result param. This allows the public key to be
// treated a Jacobian point in the secp256k1 group in calculations.
func (p *PublicKey) AsJacobian(result *JacobianPoint) {
result.X.Set(&p.x)
result.Y.Set(&p.y)
result.Z.SetInt(1)
}
// IsOnCurve returns whether or not the public key represents a point on the
// secp256k1 curve.
func (p *PublicKey) IsOnCurve() bool {
return isOnCurve(&p.x, &p.y)
}

15
vendor/github.com/lestrrat-go/blackmagic/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,15 @@
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary, built with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Dependency directories (remove the comment below to include it)
# vendor/

21
vendor/github.com/lestrrat-go/blackmagic/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2021 lestrrat-go
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

3
vendor/github.com/lestrrat-go/blackmagic/README.md generated vendored Normal file
View File

@@ -0,0 +1,3 @@
# blackmagic
Reflect-based black magic. YMMV, and use with caution

125
vendor/github.com/lestrrat-go/blackmagic/blackmagic.go generated vendored Normal file
View File

@@ -0,0 +1,125 @@
package blackmagic
import (
"fmt"
"reflect"
)
type errInvalidValue struct{}
func (*errInvalidValue) Error() string {
return "invalid value (probably an untyped nil)"
}
// InvalidValueError is a sentinel error that can be used to
// indicate that a value is invalid. This can happen when the
// source value is an untyped nil, and we have no further information
// about the type of the value, obstructing the assignment.
func InvalidValueError() error {
return &errInvalidValue{}
}
// AssignField is a convenience function to assign a value to
// an optional struct field. In Go, an optional struct field is
// usually denoted by a pointer to T instead of T:
//
// type Object struct {
// Optional *T
// }
//
// This gets a bit cumbersome when you want to assign literals
// or you do not want to worry about taking the address of a
// variable.
//
// Object.Optional = &"foo" // doesn't compile!
//
// Instead you can use this function to do it in one line:
//
// blackmagic.AssignOptionalField(&Object.Optionl, "foo")
func AssignOptionalField(dst, src interface{}) error {
dstRV := reflect.ValueOf(dst)
srcRV := reflect.ValueOf(src)
if dstRV.Kind() != reflect.Pointer || dstRV.Elem().Kind() != reflect.Pointer {
return fmt.Errorf(`dst must be a pointer to a field that is turn a pointer of src (%T)`, src)
}
if !dstRV.Elem().CanSet() {
return fmt.Errorf(`dst (%T) is not assignable`, dstRV.Elem().Interface())
}
if !reflect.PointerTo(srcRV.Type()).AssignableTo(dstRV.Elem().Type()) {
return fmt.Errorf(`cannot assign src (%T) to dst (%T)`, src, dst)
}
ptr := reflect.New(srcRV.Type())
ptr.Elem().Set(srcRV)
dstRV.Elem().Set(ptr)
return nil
}
// AssignIfCompatible is a convenience function to safely
// assign arbitrary values. dst must be a pointer to an
// empty interface, or it must be a pointer to a compatible
// variable type that can hold src.
func AssignIfCompatible(dst, src interface{}) error {
orv := reflect.ValueOf(src) // save this value for error reporting
result := orv
// src can be a pointer or a slice, and the code will slightly change
// depending on this
var srcIsPtr bool
var srcIsSlice bool
switch result.Kind() {
case reflect.Ptr:
srcIsPtr = true
case reflect.Slice:
srcIsSlice = true
}
rv := reflect.ValueOf(dst)
if rv.Kind() != reflect.Ptr {
return fmt.Errorf(`destination argument to AssignIfCompatible() must be a pointer: %T`, dst)
}
actualDst := rv
for {
if !actualDst.IsValid() {
return fmt.Errorf(`could not find a valid destination for AssignIfCompatible() (%T)`, dst)
}
if actualDst.CanSet() {
break
}
actualDst = actualDst.Elem()
}
switch actualDst.Kind() {
case reflect.Interface:
// If it's an interface, we can just assign the pointer to the interface{}
default:
// If it's a pointer to the struct we're looking for, we need to set
// the de-referenced struct
if !srcIsSlice && srcIsPtr {
result = result.Elem()
}
}
if !result.IsValid() {
// At this point there's nothing we can do. return an error
return fmt.Errorf(`source value is invalid (%T): %w`, src, InvalidValueError())
}
if actualDst.Kind() == reflect.Ptr {
actualDst.Set(result.Addr())
return nil
}
if !result.Type().AssignableTo(actualDst.Type()) {
return fmt.Errorf(`argument to AssignIfCompatible() must be compatible with %T (was %T)`, orv.Interface(), dst)
}
if !actualDst.CanSet() {
return fmt.Errorf(`argument to AssignIfCompatible() must be settable`)
}
actualDst.Set(result)
return nil
}

View File

@@ -0,0 +1,32 @@
# If you prefer the allow list template instead of the deny list, see community template:
# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore
#
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary, built with `go test -c`
*.test
# Code coverage profiles and other test artifacts
*.out
coverage.*
*.coverprofile
profile.cov
# Dependency directories (remove the comment below to include it)
# vendor/
# Go workspace file
go.work
go.work.sum
# env file
.env
# Editor/IDE
# .idea/
# .vscode/

5
vendor/github.com/lestrrat-go/dsig-secp256k1/Changes generated vendored Normal file
View File

@@ -0,0 +1,5 @@
Changes
=======
v1.0.0 18 Aug 2025
* Initial release

21
vendor/github.com/lestrrat-go/dsig-secp256k1/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2025 lestrrat-go
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -0,0 +1,29 @@
package dsigsecp256k1
import (
"crypto"
"github.com/decred/dcrd/dcrec/secp256k1/v4"
"github.com/lestrrat-go/dsig"
)
const ECDSAWithSecp256k1AndSHA256 = "ECDSA_WITH_SECP256K1_AND_SHA256"
// init adds secp256k1 support when the dsig_secp256k1 build tag is used.
func init() {
// Register ES256K (secp256k1 + SHA256) support using the new API
err := dsig.RegisterAlgorithm(ECDSAWithSecp256k1AndSHA256, dsig.AlgorithmInfo{
Family: dsig.ECDSA,
Meta: dsig.ECDSAFamilyMeta{
Hash: crypto.SHA256,
},
})
if err != nil {
panic("failed to register secp256k1 algorithm: " + err.Error())
}
}
// secp256k1Curve returns the secp256k1 curve.
func Curve() *secp256k1.KoblitzCurve {
return secp256k1.S256()
}

32
vendor/github.com/lestrrat-go/dsig/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,32 @@
# If you prefer the allow list template instead of the deny list, see community template:
# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore
#
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary, built with `go test -c`
*.test
# Code coverage profiles and other test artifacts
*.out
coverage.*
*.coverprofile
profile.cov
# Dependency directories (remove the comment below to include it)
# vendor/
# Go workspace file
go.work
go.work.sum
# env file
.env
# Editor/IDE
# .idea/
# .vscode/

5
vendor/github.com/lestrrat-go/dsig/Changes generated vendored Normal file
View File

@@ -0,0 +1,5 @@
Changes
=======
v1.0.0 - 18 Aug 2025
* Initial release

21
vendor/github.com/lestrrat-go/dsig/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2025 lestrrat-go
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

163
vendor/github.com/lestrrat-go/dsig/README.md generated vendored Normal file
View File

@@ -0,0 +1,163 @@
# github.com/lestrrat-go/dsig [![CI](https://github.com/lestrrat-go/dsig/actions/workflows/ci.yml/badge.svg)](https://github.com/lestrrat-go/dsig/actions/workflows/ci.yml) [![Go Reference](https://pkg.go.dev/badge/github.com/lestrrat-go/dsig.svg)](https://pkg.go.dev/github.com/lestrrat-go/dsig) [![codecov.io](https://codecov.io/github/lestrrat-go/dsig/coverage.svg?branch=v1)](https://codecov.io/github/lestrrat-go/dsig?branch=v1)
Go module providing low-level digital signature operations.
While there are many standards for generating and verifying digital signatures, the core operations are virtually the same. This module implements the core functionality of digital signature generation / verifications in a framework agnostic way.
# Features
* RSA signatures (PKCS1v15 and PSS)
* ECDSA signatures (P-256, P-384, P-521)
* EdDSA signatures (Ed25519, Ed448)
* HMAC signatures (SHA-256, SHA-384, SHA-512)
* Support for crypto.Signer interface
* Allows for dynamic additions of algorithms in limited cases.
# SYNOPSIS
<!-- INCLUDE(examples/dsig_readme_example_test.go) -->
```go
package examples_test
import (
"crypto/ecdsa"
"crypto/ed25519"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"fmt"
"github.com/lestrrat-go/dsig"
)
func Example() {
payload := []byte("hello world")
// RSA signing and verification
{
privKey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
fmt.Printf("failed to generate RSA key: %s\n", err)
return
}
// Sign with RSA-PSS SHA256
signature, err := dsig.Sign(privKey, dsig.RSAPSSWithSHA256, payload, nil)
if err != nil {
fmt.Printf("failed to sign with RSA: %s\n", err)
return
}
// Verify with RSA-PSS SHA256
err = dsig.Verify(&privKey.PublicKey, dsig.RSAPSSWithSHA256, payload, signature)
if err != nil {
fmt.Printf("failed to verify RSA signature: %s\n", err)
return
}
}
// ECDSA signing and verification
{
privKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
fmt.Printf("failed to generate ECDSA key: %s\n", err)
return
}
// Sign with ECDSA P-256 SHA256
signature, err := dsig.Sign(privKey, dsig.ECDSAWithP256AndSHA256, payload, nil)
if err != nil {
fmt.Printf("failed to sign with ECDSA: %s\n", err)
return
}
// Verify with ECDSA P-256 SHA256
err = dsig.Verify(&privKey.PublicKey, dsig.ECDSAWithP256AndSHA256, payload, signature)
if err != nil {
fmt.Printf("failed to verify ECDSA signature: %s\n", err)
return
}
}
// EdDSA signing and verification
{
pubKey, privKey, err := ed25519.GenerateKey(rand.Reader)
if err != nil {
fmt.Printf("failed to generate Ed25519 key: %s\n", err)
return
}
// Sign with EdDSA
signature, err := dsig.Sign(privKey, dsig.EdDSA, payload, nil)
if err != nil {
fmt.Printf("failed to sign with EdDSA: %s\n", err)
return
}
// Verify with EdDSA
err = dsig.Verify(pubKey, dsig.EdDSA, payload, signature)
if err != nil {
fmt.Printf("failed to verify EdDSA signature: %s\n", err)
return
}
}
// HMAC signing and verification
{
key := []byte("secret-key")
// Sign with HMAC SHA256
signature, err := dsig.Sign(key, dsig.HMACWithSHA256, payload, nil)
if err != nil {
fmt.Printf("failed to sign with HMAC: %s\n", err)
return
}
// Verify with HMAC SHA256
err = dsig.Verify(key, dsig.HMACWithSHA256, payload, signature)
if err != nil {
fmt.Printf("failed to verify HMAC signature: %s\n", err)
return
}
}
// OUTPUT:
}
```
source: [examples/dsig_readme_example_test.go](https://github.com/lestrrat-go/dsig/blob/v1/examples/dsig_readme_example_test.go)
<!-- END INCLUDE -->
# Supported Algorithms
| Constant | Algorithm | Key Type |
|----------|-----------|----------|
| `HMACWithSHA256` | HMAC using SHA-256 | []byte |
| `HMACWithSHA384` | HMAC using SHA-384 | []byte |
| `HMACWithSHA512` | HMAC using SHA-512 | []byte |
| `RSAPKCS1v15WithSHA256` | RSA PKCS#1 v1.5 using SHA-256 | *rsa.PrivateKey / *rsa.PublicKey |
| `RSAPKCS1v15WithSHA384` | RSA PKCS#1 v1.5 using SHA-384 | *rsa.PrivateKey / *rsa.PublicKey |
| `RSAPKCS1v15WithSHA512` | RSA PKCS#1 v1.5 using SHA-512 | *rsa.PrivateKey / *rsa.PublicKey |
| `RSAPSSWithSHA256` | RSA PSS using SHA-256 | *rsa.PrivateKey / *rsa.PublicKey |
| `RSAPSSWithSHA384` | RSA PSS using SHA-384 | *rsa.PrivateKey / *rsa.PublicKey |
| `RSAPSSWithSHA512` | RSA PSS using SHA-512 | *rsa.PrivateKey / *rsa.PublicKey |
| `ECDSAWithP256AndSHA256` | ECDSA using P-256 and SHA-256 | *ecdsa.PrivateKey / *ecdsa.PublicKey |
| `ECDSAWithP384AndSHA384` | ECDSA using P-384 and SHA-384 | *ecdsa.PrivateKey / *ecdsa.PublicKey |
| `ECDSAWithP521AndSHA512` | ECDSA using P-521 and SHA-512 | *ecdsa.PrivateKey / *ecdsa.PublicKey |
| `EdDSA` | EdDSA using Ed25519 or Ed448 | ed25519.PrivateKey / ed25519.PublicKey |
# Description
This library provides low-level digital signature operations. It does minimal parameter validation for performance, uses strongly typed APIs, and has minimal dependencies.
# Contributions
## Issues
For bug reports and feature requests, please include failing tests when possible.
## Pull Requests
Please include tests that exercise your changes.
# Related Libraries
* [github.com/lestrrat-go/jwx](https://github.com/lestrrat-go/jwx) - JOSE (JWA/JWE/JWK/JWS/JWT) implementation

37
vendor/github.com/lestrrat-go/dsig/algorithms.go generated vendored Normal file
View File

@@ -0,0 +1,37 @@
package dsig
// This file defines verbose algorithm name constants that can be mapped to by
// different standards (RFC7518, FIDO, etc.) for interoperability.
//
// The algorithm names are intentionally verbose to avoid any ambiguity about
// the exact cryptographic operations being performed.
const (
// HMAC signature algorithms
// These use Hash-based Message Authentication Code with specified hash functions
HMACWithSHA256 = "HMAC_WITH_SHA256"
HMACWithSHA384 = "HMAC_WITH_SHA384"
HMACWithSHA512 = "HMAC_WITH_SHA512"
// RSA signature algorithms with PKCS#1 v1.5 padding
// These use RSA signatures with PKCS#1 v1.5 padding and specified hash functions
RSAPKCS1v15WithSHA256 = "RSA_PKCS1v15_WITH_SHA256"
RSAPKCS1v15WithSHA384 = "RSA_PKCS1v15_WITH_SHA384"
RSAPKCS1v15WithSHA512 = "RSA_PKCS1v15_WITH_SHA512"
// RSA signature algorithms with PSS padding
// These use RSA signatures with Probabilistic Signature Scheme (PSS) padding
RSAPSSWithSHA256 = "RSA_PSS_WITH_SHA256"
RSAPSSWithSHA384 = "RSA_PSS_WITH_SHA384"
RSAPSSWithSHA512 = "RSA_PSS_WITH_SHA512"
// ECDSA signature algorithms
// These use Elliptic Curve Digital Signature Algorithm with specified curves and hash functions
ECDSAWithP256AndSHA256 = "ECDSA_WITH_P256_AND_SHA256"
ECDSAWithP384AndSHA384 = "ECDSA_WITH_P384_AND_SHA384"
ECDSAWithP521AndSHA512 = "ECDSA_WITH_P521_AND_SHA512"
// EdDSA signature algorithms
// These use Edwards-curve Digital Signature Algorithm (supports Ed25519 and Ed448)
EdDSA = "EDDSA"
)

45
vendor/github.com/lestrrat-go/dsig/crypto_signer.go generated vendored Normal file
View File

@@ -0,0 +1,45 @@
package dsig
import (
"crypto"
"crypto/rand"
"fmt"
"io"
)
// cryptosign is a low-level function that signs a payload using a crypto.Signer.
// If hash is crypto.Hash(0), the payload is signed directly without hashing.
// Otherwise, the payload is hashed using the specified hash function before signing.
//
// rr is an io.Reader that provides randomness for signing. If rr is nil, it defaults to rand.Reader.
func cryptosign(signer crypto.Signer, payload []byte, hash crypto.Hash, opts crypto.SignerOpts, rr io.Reader) ([]byte, error) {
if rr == nil {
rr = rand.Reader
}
var digest []byte
if hash == crypto.Hash(0) {
digest = payload
} else {
h := hash.New()
if _, err := h.Write(payload); err != nil {
return nil, fmt.Errorf(`failed to write payload to hash: %w`, err)
}
digest = h.Sum(nil)
}
return signer.Sign(rr, digest, opts)
}
// SignCryptoSigner generates a signature using a crypto.Signer interface.
// This function can be used for hardware security modules, smart cards,
// and other implementations of the crypto.Signer interface.
//
// rr is an io.Reader that provides randomness for signing. If rr is nil, it defaults to rand.Reader.
//
// Returns the signature bytes or an error if signing fails.
func SignCryptoSigner(signer crypto.Signer, raw []byte, h crypto.Hash, opts crypto.SignerOpts, rr io.Reader) ([]byte, error) {
if signer == nil {
return nil, fmt.Errorf("dsig.SignCryptoSigner: signer is nil")
}
return cryptosign(signer, raw, h, opts, rr)
}

224
vendor/github.com/lestrrat-go/dsig/dsig.go generated vendored Normal file
View File

@@ -0,0 +1,224 @@
// Package dsig provides digital signature operations for Go.
// It contains low-level signature generation and verification tools that
// can be used by other signing libraries
//
// The package follows these design principles:
// 1. Does minimal checking of input parameters (for performance); callers need to ensure that the parameters are valid.
// 2. All exported functions are strongly typed (i.e. they do not take `any` types unless they absolutely have to).
// 3. Does not rely on other high-level packages (standalone, except for internal packages).
package dsig
import (
"crypto"
"crypto/sha256"
"crypto/sha512"
"fmt"
"hash"
"sync"
)
// Family represents the cryptographic algorithm family
type Family int
const (
InvalidFamily Family = iota
HMAC
RSA
ECDSA
EdDSAFamily
maxFamily
)
// String returns the string representation of the Family
func (f Family) String() string {
switch f {
case HMAC:
return "HMAC"
case RSA:
return "RSA"
case ECDSA:
return "ECDSA"
case EdDSAFamily:
return "EdDSA"
default:
return "InvalidFamily"
}
}
// AlgorithmInfo contains metadata about a digital signature algorithm
type AlgorithmInfo struct {
Family Family // The cryptographic family (HMAC, RSA, ECDSA, EdDSA)
Meta any // Family-specific metadata
}
// HMACFamilyMeta contains metadata specific to HMAC algorithms
type HMACFamilyMeta struct {
HashFunc func() hash.Hash // Hash function constructor
}
// RSAFamilyMeta contains metadata specific to RSA algorithms
type RSAFamilyMeta struct {
Hash crypto.Hash // Hash algorithm
PSS bool // Whether to use PSS padding (false = PKCS#1 v1.5)
}
// ECDSAFamilyMeta contains metadata specific to ECDSA algorithms
type ECDSAFamilyMeta struct {
Hash crypto.Hash // Hash algorithm
}
// EdDSAFamilyMeta contains metadata specific to EdDSA algorithms
// Currently EdDSA doesn't need specific metadata, but this provides extensibility
type EdDSAFamilyMeta struct {
// Reserved for future use
}
var algorithms = make(map[string]AlgorithmInfo)
var muAlgorithms sync.RWMutex
// RegisterAlgorithm registers a new digital signature algorithm with the specified family and metadata.
//
// info.Meta should contain extra metadata for some algorithms. Currently HMAC, RSA,
// and ECDSA family of algorithms need their respective metadata (HMACFamilyMeta,
// RSAFamilyMeta, and ECDSAFamilyMeta). Metadata for other families are ignored.
func RegisterAlgorithm(name string, info AlgorithmInfo) error {
muAlgorithms.Lock()
defer muAlgorithms.Unlock()
// Validate the metadata matches the family
switch info.Family {
case HMAC:
if _, ok := info.Meta.(HMACFamilyMeta); !ok {
return fmt.Errorf("invalid HMAC metadata for algorithm %s", name)
}
case RSA:
if _, ok := info.Meta.(RSAFamilyMeta); !ok {
return fmt.Errorf("invalid RSA metadata for algorithm %s", name)
}
case ECDSA:
if _, ok := info.Meta.(ECDSAFamilyMeta); !ok {
return fmt.Errorf("invalid ECDSA metadata for algorithm %s", name)
}
case EdDSAFamily:
// EdDSA metadata is optional for now
default:
return fmt.Errorf("unsupported algorithm family %s for algorithm %s", info.Family, name)
}
algorithms[name] = info
return nil
}
// GetAlgorithmInfo retrieves the algorithm information for a given algorithm name.
// Returns the info and true if found, zero value and false if not found.
func GetAlgorithmInfo(name string) (AlgorithmInfo, bool) {
muAlgorithms.RLock()
defer muAlgorithms.RUnlock()
info, ok := algorithms[name]
return info, ok
}
func init() {
// Register all standard algorithms with their metadata
toRegister := map[string]AlgorithmInfo{
// HMAC algorithms
HMACWithSHA256: {
Family: HMAC,
Meta: HMACFamilyMeta{
HashFunc: sha256.New,
},
},
HMACWithSHA384: {
Family: HMAC,
Meta: HMACFamilyMeta{
HashFunc: sha512.New384,
},
},
HMACWithSHA512: {
Family: HMAC,
Meta: HMACFamilyMeta{
HashFunc: sha512.New,
},
},
// RSA PKCS#1 v1.5 algorithms
RSAPKCS1v15WithSHA256: {
Family: RSA,
Meta: RSAFamilyMeta{
Hash: crypto.SHA256,
PSS: false,
},
},
RSAPKCS1v15WithSHA384: {
Family: RSA,
Meta: RSAFamilyMeta{
Hash: crypto.SHA384,
PSS: false,
},
},
RSAPKCS1v15WithSHA512: {
Family: RSA,
Meta: RSAFamilyMeta{
Hash: crypto.SHA512,
PSS: false,
},
},
// RSA PSS algorithms
RSAPSSWithSHA256: {
Family: RSA,
Meta: RSAFamilyMeta{
Hash: crypto.SHA256,
PSS: true,
},
},
RSAPSSWithSHA384: {
Family: RSA,
Meta: RSAFamilyMeta{
Hash: crypto.SHA384,
PSS: true,
},
},
RSAPSSWithSHA512: {
Family: RSA,
Meta: RSAFamilyMeta{
Hash: crypto.SHA512,
PSS: true,
},
},
// ECDSA algorithms
ECDSAWithP256AndSHA256: {
Family: ECDSA,
Meta: ECDSAFamilyMeta{
Hash: crypto.SHA256,
},
},
ECDSAWithP384AndSHA384: {
Family: ECDSA,
Meta: ECDSAFamilyMeta{
Hash: crypto.SHA384,
},
},
ECDSAWithP521AndSHA512: {
Family: ECDSA,
Meta: ECDSAFamilyMeta{
Hash: crypto.SHA512,
},
},
// EdDSA algorithm
EdDSA: {
Family: EdDSAFamily,
Meta: EdDSAFamilyMeta{},
},
}
for name, info := range toRegister {
if err := RegisterAlgorithm(name, info); err != nil {
panic(fmt.Sprintf("failed to register algorithm %s: %v", name, err))
}
}
}

200
vendor/github.com/lestrrat-go/dsig/ecdsa.go generated vendored Normal file
View File

@@ -0,0 +1,200 @@
package dsig
import (
"crypto"
"crypto/ecdsa"
"crypto/rand"
"encoding/asn1"
"fmt"
"io"
"math/big"
"github.com/lestrrat-go/dsig/internal/ecutil"
)
func ecdsaGetSignerKey(key any) (*ecdsa.PrivateKey, crypto.Signer, bool, error) {
cs, isCryptoSigner := key.(crypto.Signer)
if isCryptoSigner {
if !isValidECDSAKey(key) {
return nil, nil, false, fmt.Errorf(`invalid key type %T for ECDSA algorithm`, key)
}
switch key.(type) {
case ecdsa.PrivateKey, *ecdsa.PrivateKey:
// if it's ecdsa.PrivateKey, it's more efficient to
// go through the non-crypto.Signer route. Set isCryptoSigner to false
isCryptoSigner = false
}
}
if isCryptoSigner {
return nil, cs, true, nil
}
privkey, ok := key.(*ecdsa.PrivateKey)
if !ok {
return nil, nil, false, fmt.Errorf(`invalid key type %T. *ecdsa.PrivateKey is required`, key)
}
return privkey, nil, false, nil
}
// UnpackASN1ECDSASignature unpacks an ASN.1 encoded ECDSA signature into r and s values.
// This is typically used when working with crypto.Signer interfaces that return ASN.1 encoded signatures.
func UnpackASN1ECDSASignature(signed []byte, r, s *big.Int) error {
// Okay, this is silly, but hear me out. When we use the
// crypto.Signer interface, the PrivateKey is hidden.
// But we need some information about the key (its bit size).
//
// So while silly, we're going to have to make another call
// here and fetch the Public key.
// (This probably means that this information should be cached somewhere)
var p struct {
R *big.Int // TODO: get this from a pool?
S *big.Int
}
if _, err := asn1.Unmarshal(signed, &p); err != nil {
return fmt.Errorf(`failed to unmarshal ASN1 encoded signature: %w`, err)
}
r.Set(p.R)
s.Set(p.S)
return nil
}
// UnpackECDSASignature unpacks a JWS-format ECDSA signature into r and s values.
// The signature should be in the format specified by RFC 7515 (r||s as fixed-length byte arrays).
func UnpackECDSASignature(signature []byte, pubkey *ecdsa.PublicKey, r, s *big.Int) error {
keySize := ecutil.CalculateKeySize(pubkey.Curve)
if len(signature) != keySize*2 {
return fmt.Errorf(`invalid signature length for curve %q`, pubkey.Curve.Params().Name)
}
r.SetBytes(signature[:keySize])
s.SetBytes(signature[keySize:])
return nil
}
// PackECDSASignature packs the r and s values from an ECDSA signature into a JWS-format byte slice.
// The output format follows RFC 7515: r||s as fixed-length byte arrays.
func PackECDSASignature(r *big.Int, sbig *big.Int, curveBits int) ([]byte, error) {
keyBytes := curveBits / 8
if curveBits%8 > 0 {
keyBytes++
}
// Serialize r and s into fixed-length bytes
rBytes := r.Bytes()
rBytesPadded := make([]byte, keyBytes)
copy(rBytesPadded[keyBytes-len(rBytes):], rBytes)
sBytes := sbig.Bytes()
sBytesPadded := make([]byte, keyBytes)
copy(sBytesPadded[keyBytes-len(sBytes):], sBytes)
// Output as r||s
return append(rBytesPadded, sBytesPadded...), nil
}
// SignECDSA generates an ECDSA signature for the given payload using the specified private key and hash.
// The raw parameter should be the pre-computed signing input (typically header.payload).
//
// rr is an io.Reader that provides randomness for signing. if rr is nil, it defaults to rand.Reader.
func SignECDSA(key *ecdsa.PrivateKey, payload []byte, h crypto.Hash, rr io.Reader) ([]byte, error) {
if !isValidECDSAKey(key) {
return nil, fmt.Errorf(`invalid key type %T for ECDSA algorithm`, key)
}
hh := h.New()
if _, err := hh.Write(payload); err != nil {
return nil, fmt.Errorf(`failed to write payload using ecdsa: %w`, err)
}
digest := hh.Sum(nil)
if rr == nil {
rr = rand.Reader
}
// Sign and get r, s values
r, s, err := ecdsa.Sign(rr, key, digest)
if err != nil {
return nil, fmt.Errorf(`failed to sign payload using ecdsa: %w`, err)
}
return PackECDSASignature(r, s, key.Curve.Params().BitSize)
}
// SignECDSACryptoSigner generates an ECDSA signature using a crypto.Signer interface.
// This function works with hardware security modules and other crypto.Signer implementations.
// The signature is converted from ASN.1 format to JWS format (r||s).
//
// rr is an io.Reader that provides randomness for signing. If rr is nil, it defaults to rand.Reader.
func SignECDSACryptoSigner(signer crypto.Signer, raw []byte, h crypto.Hash, rr io.Reader) ([]byte, error) {
signed, err := SignCryptoSigner(signer, raw, h, h, rr)
if err != nil {
return nil, fmt.Errorf(`failed to sign payload using crypto.Signer: %w`, err)
}
return signECDSACryptoSigner(signer, signed)
}
func signECDSACryptoSigner(signer crypto.Signer, signed []byte) ([]byte, error) {
cpub := signer.Public()
pubkey, ok := cpub.(*ecdsa.PublicKey)
if !ok {
return nil, fmt.Errorf(`expected *ecdsa.PublicKey, got %T`, pubkey)
}
curveBits := pubkey.Curve.Params().BitSize
var r, s big.Int
if err := UnpackASN1ECDSASignature(signed, &r, &s); err != nil {
return nil, fmt.Errorf(`failed to unpack ASN1 encoded signature: %w`, err)
}
return PackECDSASignature(&r, &s, curveBits)
}
func ecdsaVerify(key *ecdsa.PublicKey, buf []byte, h crypto.Hash, r, s *big.Int) error {
hasher := h.New()
hasher.Write(buf)
digest := hasher.Sum(nil)
if !ecdsa.Verify(key, digest, r, s) {
return NewVerificationError("invalid ECDSA signature")
}
return nil
}
// VerifyECDSA verifies an ECDSA signature for the given payload.
// This function verifies the signature using the specified public key and hash algorithm.
// The payload parameter should be the pre-computed signing input (typically header.payload).
func VerifyECDSA(key *ecdsa.PublicKey, payload, signature []byte, h crypto.Hash) error {
var r, s big.Int
if err := UnpackECDSASignature(signature, key, &r, &s); err != nil {
return fmt.Errorf("dsig.VerifyECDSA: failed to unpack ECDSA signature: %w", err)
}
return ecdsaVerify(key, payload, h, &r, &s)
}
// VerifyECDSACryptoSigner verifies an ECDSA signature for crypto.Signer implementations.
// This function is useful for verifying signatures created by hardware security modules
// or other implementations of the crypto.Signer interface.
// The payload parameter should be the pre-computed signing input (typically header.payload).
func VerifyECDSACryptoSigner(signer crypto.Signer, payload, signature []byte, h crypto.Hash) error {
var pubkey *ecdsa.PublicKey
switch cpub := signer.Public(); cpub := cpub.(type) {
case ecdsa.PublicKey:
pubkey = &cpub
case *ecdsa.PublicKey:
pubkey = cpub
default:
return fmt.Errorf(`dsig.VerifyECDSACryptoSigner: expected *ecdsa.PublicKey, got %T`, cpub)
}
var r, s big.Int
if err := UnpackECDSASignature(signature, pubkey, &r, &s); err != nil {
return fmt.Errorf("dsig.VerifyECDSACryptoSigner: failed to unpack ASN.1 encoded ECDSA signature: %w", err)
}
return ecdsaVerify(pubkey, payload, h, &r, &s)
}

44
vendor/github.com/lestrrat-go/dsig/eddsa.go generated vendored Normal file
View File

@@ -0,0 +1,44 @@
package dsig
import (
"crypto"
"crypto/ed25519"
"fmt"
)
func eddsaGetSigner(key any) (crypto.Signer, error) {
// The ed25519.PrivateKey object implements crypto.Signer, so we should
// simply accept a crypto.Signer here.
signer, ok := key.(crypto.Signer)
if ok {
if !isValidEDDSAKey(key) {
return nil, fmt.Errorf(`invalid key type %T for EdDSA algorithm`, key)
}
return signer, nil
}
// This fallback exists for cases when users give us a pointer instead of non-pointer, etc.
privkey, ok := key.(ed25519.PrivateKey)
if !ok {
return nil, fmt.Errorf(`failed to retrieve ed25519.PrivateKey out of %T`, key)
}
return privkey, nil
}
// SignEdDSA generates an EdDSA (Ed25519) signature for the given payload.
// The raw parameter should be the pre-computed signing input (typically header.payload).
// EdDSA is deterministic and doesn't require additional hashing of the input.
func SignEdDSA(key ed25519.PrivateKey, payload []byte) ([]byte, error) {
return ed25519.Sign(key, payload), nil
}
// VerifyEdDSA verifies an EdDSA (Ed25519) signature for the given payload.
// This function verifies the signature using Ed25519 verification algorithm.
// The payload parameter should be the pre-computed signing input (typically header.payload).
// EdDSA is deterministic and provides strong security guarantees without requiring hash function selection.
func VerifyEdDSA(key ed25519.PublicKey, payload, signature []byte) error {
if !ed25519.Verify(key, payload, signature) {
return fmt.Errorf("invalid EdDSA signature")
}
return nil
}

45
vendor/github.com/lestrrat-go/dsig/hmac.go generated vendored Normal file
View File

@@ -0,0 +1,45 @@
package dsig
import (
"crypto/hmac"
"fmt"
"hash"
)
func toHMACKey(dst *[]byte, key any) error {
keyBytes, ok := key.([]byte)
if !ok {
return fmt.Errorf(`dsig.toHMACKey: invalid key type %T. []byte is required`, key)
}
if len(keyBytes) == 0 {
return fmt.Errorf(`dsig.toHMACKey: missing key while signing payload`)
}
*dst = keyBytes
return nil
}
// SignHMAC generates an HMAC signature for the given payload using the specified hash function and key.
// The raw parameter should be the pre-computed signing input (typically header.payload).
func SignHMAC(key, payload []byte, hfunc func() hash.Hash) ([]byte, error) {
h := hmac.New(hfunc, key)
if _, err := h.Write(payload); err != nil {
return nil, fmt.Errorf(`failed to write payload using hmac: %w`, err)
}
return h.Sum(nil), nil
}
// VerifyHMAC verifies an HMAC signature for the given payload.
// This function verifies the signature using the specified key and hash function.
// The payload parameter should be the pre-computed signing input (typically header.payload).
func VerifyHMAC(key, payload, signature []byte, hfunc func() hash.Hash) error {
expected, err := SignHMAC(key, payload, hfunc)
if err != nil {
return fmt.Errorf("failed to sign payload for verification: %w", err)
}
if !hmac.Equal(signature, expected) {
return NewVerificationError("invalid HMAC signature")
}
return nil
}

View File

@@ -0,0 +1,76 @@
// Package ecutil defines tools that help with elliptic curve related
// computation
package ecutil
import (
"crypto/elliptic"
"math/big"
"sync"
)
const (
// size of buffer that needs to be allocated for EC521 curve
ec521BufferSize = 66 // (521 / 8) + 1
)
var ecpointBufferPool = sync.Pool{
New: func() any {
// In most cases the curve bit size will be less than this length
// so allocate the maximum, and keep reusing
buf := make([]byte, 0, ec521BufferSize)
return &buf
},
}
func getCrvFixedBuffer(size int) []byte {
//nolint:forcetypeassert
buf := *(ecpointBufferPool.Get().(*[]byte))
if size > ec521BufferSize && cap(buf) < size {
buf = append(buf, make([]byte, size-cap(buf))...)
}
return buf[:size]
}
// ReleaseECPointBuffer releases the []byte buffer allocated.
func ReleaseECPointBuffer(buf []byte) {
buf = buf[:cap(buf)]
buf[0] = 0x0
for i := 1; i < len(buf); i *= 2 {
copy(buf[i:], buf[:i])
}
buf = buf[:0]
ecpointBufferPool.Put(&buf)
}
func CalculateKeySize(crv elliptic.Curve) int {
// We need to create a buffer that fits the entire curve.
// If the curve size is 66, that fits in 9 bytes. If the curve
// size is 64, it fits in 8 bytes.
bits := crv.Params().BitSize
// For most common cases we know before hand what the byte length
// is going to be. optimize
var inBytes int
switch bits {
case 224, 256, 384: // TODO: use constant?
inBytes = bits / 8
case 521:
inBytes = ec521BufferSize
default:
inBytes = bits / 8
if (bits % 8) != 0 {
inBytes++
}
}
return inBytes
}
// AllocECPointBuffer allocates a buffer for the given point in the given
// curve. This buffer should be released using the ReleaseECPointBuffer
// function.
func AllocECPointBuffer(v *big.Int, crv elliptic.Curve) []byte {
buf := getCrvFixedBuffer(CalculateKeySize(crv))
v.FillBytes(buf)
return buf
}

63
vendor/github.com/lestrrat-go/dsig/rsa.go generated vendored Normal file
View File

@@ -0,0 +1,63 @@
package dsig
import (
"crypto"
"crypto/rsa"
"fmt"
"io"
)
func rsaGetSignerCryptoSignerKey(key any) (crypto.Signer, bool, error) {
if !isValidRSAKey(key) {
return nil, false, fmt.Errorf(`invalid key type %T for RSA algorithm`, key)
}
cs, isCryptoSigner := key.(crypto.Signer)
if isCryptoSigner {
return cs, true, nil
}
return nil, false, nil
}
// rsaPSSOptions returns the PSS options for RSA-PSS signatures with the specified hash.
// The salt length is set to equal the hash length as per RFC 7518.
func rsaPSSOptions(h crypto.Hash) rsa.PSSOptions {
return rsa.PSSOptions{
Hash: h,
SaltLength: rsa.PSSSaltLengthEqualsHash,
}
}
// SignRSA generates an RSA signature for the given payload using the specified private key and options.
// The raw parameter should be the pre-computed signing input (typically header.payload).
// If pss is true, RSA-PSS is used; otherwise, PKCS#1 v1.5 is used.
//
// The rr parameter is an optional io.Reader that can be used to provide randomness for signing.
// If rr is nil, it defaults to rand.Reader.
func SignRSA(key *rsa.PrivateKey, payload []byte, h crypto.Hash, pss bool, rr io.Reader) ([]byte, error) {
if !isValidRSAKey(key) {
return nil, fmt.Errorf(`invalid key type %T for RSA algorithm`, key)
}
var opts crypto.SignerOpts = h
if pss {
rsaopts := rsaPSSOptions(h)
opts = &rsaopts
}
return cryptosign(key, payload, h, opts, rr)
}
// VerifyRSA verifies an RSA signature for the given payload and header.
// This function constructs the signing input by encoding the header and payload according to JWS specification,
// then verifies the signature using the specified public key and hash algorithm.
// If pss is true, RSA-PSS verification is used; otherwise, PKCS#1 v1.5 verification is used.
func VerifyRSA(key *rsa.PublicKey, payload, signature []byte, h crypto.Hash, pss bool) error {
if !isValidRSAKey(key) {
return fmt.Errorf(`invalid key type %T for RSA algorithm`, key)
}
hasher := h.New()
hasher.Write(payload)
digest := hasher.Sum(nil)
if pss {
return rsa.VerifyPSS(key, h, digest, signature, &rsa.PSSOptions{Hash: h, SaltLength: rsa.PSSSaltLengthEqualsHash})
}
return rsa.VerifyPKCS1v15(key, h, digest, signature)
}

100
vendor/github.com/lestrrat-go/dsig/sign.go generated vendored Normal file
View File

@@ -0,0 +1,100 @@
package dsig
import (
"crypto"
"crypto/rsa"
"fmt"
"io"
)
// Sign generates a digital signature using the specified key and algorithm.
//
// This function loads the signer registered in the dsig package _ONLY_.
// It does not support custom signers that the user might have registered.
//
// rr is an io.Reader that provides randomness for signing. If rr is nil, it defaults to rand.Reader.
// Not all algorithms require this parameter, but it is included for consistency.
// 99% of the time, you can pass nil for rr, and it will work fine.
func Sign(key any, alg string, payload []byte, rr io.Reader) ([]byte, error) {
info, ok := GetAlgorithmInfo(alg)
if !ok {
return nil, fmt.Errorf(`dsig.Sign: unsupported signature algorithm %q`, alg)
}
switch info.Family {
case HMAC:
return dispatchHMACSign(key, info, payload)
case RSA:
return dispatchRSASign(key, info, payload, rr)
case ECDSA:
return dispatchECDSASign(key, info, payload, rr)
case EdDSAFamily:
return dispatchEdDSASign(key, info, payload, rr)
default:
return nil, fmt.Errorf(`dsig.Sign: unsupported signature family %q`, info.Family)
}
}
func dispatchHMACSign(key any, info AlgorithmInfo, payload []byte) ([]byte, error) {
meta, ok := info.Meta.(HMACFamilyMeta)
if !ok {
return nil, fmt.Errorf(`dsig.Sign: invalid HMAC metadata`)
}
var hmackey []byte
if err := toHMACKey(&hmackey, key); err != nil {
return nil, fmt.Errorf(`dsig.Sign: %w`, err)
}
return SignHMAC(hmackey, payload, meta.HashFunc)
}
func dispatchRSASign(key any, info AlgorithmInfo, payload []byte, rr io.Reader) ([]byte, error) {
meta, ok := info.Meta.(RSAFamilyMeta)
if !ok {
return nil, fmt.Errorf(`dsig.Sign: invalid RSA metadata`)
}
cs, isCryptoSigner, err := rsaGetSignerCryptoSignerKey(key)
if err != nil {
return nil, fmt.Errorf(`dsig.Sign: %w`, err)
}
if isCryptoSigner {
var options crypto.SignerOpts = meta.Hash
if meta.PSS {
rsaopts := rsaPSSOptions(meta.Hash)
options = &rsaopts
}
return SignCryptoSigner(cs, payload, meta.Hash, options, rr)
}
privkey, ok := key.(*rsa.PrivateKey)
if !ok {
return nil, fmt.Errorf(`dsig.Sign: invalid key type %T. *rsa.PrivateKey is required`, key)
}
return SignRSA(privkey, payload, meta.Hash, meta.PSS, rr)
}
func dispatchEdDSASign(key any, _ AlgorithmInfo, payload []byte, rr io.Reader) ([]byte, error) {
signer, err := eddsaGetSigner(key)
if err != nil {
return nil, fmt.Errorf(`dsig.Sign: %w`, err)
}
return SignCryptoSigner(signer, payload, crypto.Hash(0), crypto.Hash(0), rr)
}
func dispatchECDSASign(key any, info AlgorithmInfo, payload []byte, rr io.Reader) ([]byte, error) {
meta, ok := info.Meta.(ECDSAFamilyMeta)
if !ok {
return nil, fmt.Errorf(`dsig.Sign: invalid ECDSA metadata`)
}
privkey, cs, isCryptoSigner, err := ecdsaGetSignerKey(key)
if err != nil {
return nil, fmt.Errorf(`dsig.Sign: %w`, err)
}
if isCryptoSigner {
return SignECDSACryptoSigner(cs, payload, meta.Hash, rr)
}
return SignECDSA(privkey, payload, meta.Hash, rr)
}

66
vendor/github.com/lestrrat-go/dsig/validation.go generated vendored Normal file
View File

@@ -0,0 +1,66 @@
package dsig
import (
"crypto/ecdsa"
"crypto/ed25519"
"crypto/rsa"
)
// isValidRSAKey validates that the provided key type is appropriate for RSA algorithms.
// It returns false if the key is clearly incompatible (e.g., ECDSA or EdDSA keys).
func isValidRSAKey(key any) bool {
switch key.(type) {
case
ecdsa.PrivateKey, *ecdsa.PrivateKey,
ed25519.PrivateKey:
// these are NOT ok for RSA algorithms
return false
}
return true
}
// isValidECDSAKey validates that the provided key type is appropriate for ECDSA algorithms.
// It returns false if the key is clearly incompatible (e.g., RSA or EdDSA keys).
func isValidECDSAKey(key any) bool {
switch key.(type) {
case
ed25519.PrivateKey,
rsa.PrivateKey, *rsa.PrivateKey:
// these are NOT ok for ECDSA algorithms
return false
}
return true
}
// isValidEDDSAKey validates that the provided key type is appropriate for EdDSA algorithms.
// It returns false if the key is clearly incompatible (e.g., RSA or ECDSA keys).
func isValidEDDSAKey(key any) bool {
switch key.(type) {
case
ecdsa.PrivateKey, *ecdsa.PrivateKey,
rsa.PrivateKey, *rsa.PrivateKey:
// these are NOT ok for EdDSA algorithms
return false
}
return true
}
// VerificationError represents an error that occurred during signature verification.
type VerificationError struct {
message string
}
func (e *VerificationError) Error() string {
return e.message
}
// NewVerificationError creates a new verification error with the given message.
func NewVerificationError(message string) error {
return &VerificationError{message: message}
}
// IsVerificationError checks if the given error is a verification error.
func IsVerificationError(err error) bool {
_, ok := err.(*VerificationError)
return ok
}

134
vendor/github.com/lestrrat-go/dsig/verify.go generated vendored Normal file
View File

@@ -0,0 +1,134 @@
package dsig
import (
"crypto"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/rsa"
"fmt"
)
// Verify verifies a digital signature using the specified key and algorithm.
//
// This function loads the verifier registered in the dsig package _ONLY_.
// It does not support custom verifiers that the user might have registered.
func Verify(key any, alg string, payload, signature []byte) error {
info, ok := GetAlgorithmInfo(alg)
if !ok {
return fmt.Errorf(`dsig.Verify: unsupported signature algorithm %q`, alg)
}
switch info.Family {
case HMAC:
return dispatchHMACVerify(key, info, payload, signature)
case RSA:
return dispatchRSAVerify(key, info, payload, signature)
case ECDSA:
return dispatchECDSAVerify(key, info, payload, signature)
case EdDSAFamily:
return dispatchEdDSAVerify(key, info, payload, signature)
default:
return fmt.Errorf(`dsig.Verify: unsupported signature family %q`, info.Family)
}
}
func dispatchHMACVerify(key any, info AlgorithmInfo, payload, signature []byte) error {
meta, ok := info.Meta.(HMACFamilyMeta)
if !ok {
return fmt.Errorf(`dsig.Verify: invalid HMAC metadata`)
}
var hmackey []byte
if err := toHMACKey(&hmackey, key); err != nil {
return fmt.Errorf(`dsig.Verify: %w`, err)
}
return VerifyHMAC(hmackey, payload, signature, meta.HashFunc)
}
func dispatchRSAVerify(key any, info AlgorithmInfo, payload, signature []byte) error {
meta, ok := info.Meta.(RSAFamilyMeta)
if !ok {
return fmt.Errorf(`dsig.Verify: invalid RSA metadata`)
}
var pubkey *rsa.PublicKey
if cs, ok := key.(crypto.Signer); ok {
cpub := cs.Public()
switch cpub := cpub.(type) {
case rsa.PublicKey:
pubkey = &cpub
case *rsa.PublicKey:
pubkey = cpub
default:
return fmt.Errorf(`dsig.Verify: failed to retrieve rsa.PublicKey out of crypto.Signer %T`, key)
}
} else {
var ok bool
pubkey, ok = key.(*rsa.PublicKey)
if !ok {
return fmt.Errorf(`dsig.Verify: failed to retrieve *rsa.PublicKey out of %T`, key)
}
}
return VerifyRSA(pubkey, payload, signature, meta.Hash, meta.PSS)
}
func dispatchECDSAVerify(key any, info AlgorithmInfo, payload, signature []byte) error {
meta, ok := info.Meta.(ECDSAFamilyMeta)
if !ok {
return fmt.Errorf(`dsig.Verify: invalid ECDSA metadata`)
}
pubkey, cs, isCryptoSigner, err := ecdsaGetVerifierKey(key)
if err != nil {
return fmt.Errorf(`dsig.Verify: %w`, err)
}
if isCryptoSigner {
return VerifyECDSACryptoSigner(cs, payload, signature, meta.Hash)
}
return VerifyECDSA(pubkey, payload, signature, meta.Hash)
}
func dispatchEdDSAVerify(key any, _ AlgorithmInfo, payload, signature []byte) error {
var pubkey ed25519.PublicKey
signer, ok := key.(crypto.Signer)
if ok {
v := signer.Public()
pubkey, ok = v.(ed25519.PublicKey)
if !ok {
return fmt.Errorf(`dsig.Verify: expected crypto.Signer.Public() to return ed25519.PublicKey, but got %T`, v)
}
} else {
var ok bool
pubkey, ok = key.(ed25519.PublicKey)
if !ok {
return fmt.Errorf(`dsig.Verify: failed to retrieve ed25519.PublicKey out of %T`, key)
}
}
return VerifyEdDSA(pubkey, payload, signature)
}
func ecdsaGetVerifierKey(key any) (*ecdsa.PublicKey, crypto.Signer, bool, error) {
cs, isCryptoSigner := key.(crypto.Signer)
if isCryptoSigner {
switch key.(type) {
case ecdsa.PublicKey, *ecdsa.PublicKey:
// if it's ecdsa.PublicKey, it's more efficient to
// go through the non-crypto.Signer route. Set isCryptoSigner to false
isCryptoSigner = false
}
}
if isCryptoSigner {
return nil, cs, true, nil
}
pubkey, ok := key.(*ecdsa.PublicKey)
if !ok {
return nil, nil, false, fmt.Errorf(`invalid key type %T. *ecdsa.PublicKey is required`, key)
}
return pubkey, nil, false, nil
}

15
vendor/github.com/lestrrat-go/httpcc/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,15 @@
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary, built with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Dependency directories (remove the comment below to include it)
# vendor/

21
vendor/github.com/lestrrat-go/httpcc/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2020 lestrrat-go
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

35
vendor/github.com/lestrrat-go/httpcc/README.md generated vendored Normal file
View File

@@ -0,0 +1,35 @@
httpcc
======
Parses HTTP/1.1 Cache-Control header, and returns a struct that is convenient
for the end-user to do what they will with.
# Parsing the HTTP Request
```go
dir, err := httpcc.ParseRequest(req.Header.Get(`Cache-Control`))
// dir.MaxAge() uint64, bool
// dir.MaxStale() uint64, bool
// dir.MinFresh() uint64, bool
// dir.NoCache() bool
// dir.NoStore() bool
// dir.NoTransform() bool
// dir.OnlyIfCached() bool
// dir.Extensions() map[string]string
```
# Parsing the HTTP Response
```go
directives, err := httpcc.ParseResponse(res.Header.Get(`Cache-Control`))
// dir.MaxAge() uint64, bool
// dir.MustRevalidate() bool
// dir.NoCache() []string
// dir.NoStore() bool
// dir.NoTransform() bool
// dir.Public() bool
// dir.Private() bool
// dir.SMaxAge() uint64, bool
// dir.Extensions() map[string]string
```

117
vendor/github.com/lestrrat-go/httpcc/directives.go generated vendored Normal file
View File

@@ -0,0 +1,117 @@
package httpcc
type RequestDirective struct {
maxAge *uint64
maxStale *uint64
minFresh *uint64
noCache bool
noStore bool
noTransform bool
onlyIfCached bool
extensions map[string]string
}
func (d *RequestDirective) MaxAge() (uint64, bool) {
if v := d.maxAge; v != nil {
return *v, true
}
return 0, false
}
func (d *RequestDirective) MaxStale() (uint64, bool) {
if v := d.maxStale; v != nil {
return *v, true
}
return 0, false
}
func (d *RequestDirective) MinFresh() (uint64, bool) {
if v := d.minFresh; v != nil {
return *v, true
}
return 0, false
}
func (d *RequestDirective) NoCache() bool {
return d.noCache
}
func (d *RequestDirective) NoStore() bool {
return d.noStore
}
func (d *RequestDirective) NoTransform() bool {
return d.noTransform
}
func (d *RequestDirective) OnlyIfCached() bool {
return d.onlyIfCached
}
func (d *RequestDirective) Extensions() map[string]string {
return d.extensions
}
func (d *RequestDirective) Extension(s string) string {
return d.extensions[s]
}
type ResponseDirective struct {
maxAge *uint64
noCache []string
noStore bool
noTransform bool
public bool
private []string
proxyRevalidate bool
sMaxAge *uint64
extensions map[string]string
}
func (d *ResponseDirective) MaxAge() (uint64, bool) {
if v := d.maxAge; v != nil {
return *v, true
}
return 0, false
}
func (d *ResponseDirective) NoCache() []string {
return d.noCache
}
func (d *ResponseDirective) NoStore() bool {
return d.noStore
}
func (d *ResponseDirective) NoTransform() bool {
return d.noTransform
}
func (d *ResponseDirective) Public() bool {
return d.public
}
func (d *ResponseDirective) Private() []string {
return d.private
}
func (d *ResponseDirective) ProxyRevalidate() bool {
return d.proxyRevalidate
}
func (d *ResponseDirective) SMaxAge() (uint64, bool) {
if v := d.sMaxAge; v != nil {
return *v, true
}
return 0, false
}
func (d *ResponseDirective) Extensions() map[string]string {
return d.extensions
}
func (d *ResponseDirective) Extension(s string) string {
return d.extensions[s]
}

310
vendor/github.com/lestrrat-go/httpcc/httpcc.go generated vendored Normal file
View File

@@ -0,0 +1,310 @@
package httpcc
import (
"bufio"
"fmt"
"strconv"
"strings"
"unicode/utf8"
)
const (
// Request Cache-Control directives
MaxAge = "max-age" // used in response as well
MaxStale = "max-stale"
MinFresh = "min-fresh"
NoCache = "no-cache" // used in response as well
NoStore = "no-store" // used in response as well
NoTransform = "no-transform" // used in response as well
OnlyIfCached = "only-if-cached"
// Response Cache-Control directive
MustRevalidate = "must-revalidate"
Public = "public"
Private = "private"
ProxyRevalidate = "proxy-revalidate"
SMaxAge = "s-maxage"
)
type TokenPair struct {
Name string
Value string
}
type TokenValuePolicy int
const (
NoArgument TokenValuePolicy = iota
TokenOnly
QuotedStringOnly
AnyTokenValue
)
type directiveValidator interface {
Validate(string) TokenValuePolicy
}
type directiveValidatorFn func(string) TokenValuePolicy
func (fn directiveValidatorFn) Validate(ccd string) TokenValuePolicy {
return fn(ccd)
}
func responseDirectiveValidator(s string) TokenValuePolicy {
switch s {
case MustRevalidate, NoStore, NoTransform, Public, ProxyRevalidate:
return NoArgument
case NoCache, Private:
return QuotedStringOnly
case MaxAge, SMaxAge:
return TokenOnly
default:
return AnyTokenValue
}
}
func requestDirectiveValidator(s string) TokenValuePolicy {
switch s {
case MaxAge, MaxStale, MinFresh:
return TokenOnly
case NoCache, NoStore, NoTransform, OnlyIfCached:
return NoArgument
default:
return AnyTokenValue
}
}
// ParseRequestDirective parses a single token.
func ParseRequestDirective(s string) (*TokenPair, error) {
return parseDirective(s, directiveValidatorFn(requestDirectiveValidator))
}
func ParseResponseDirective(s string) (*TokenPair, error) {
return parseDirective(s, directiveValidatorFn(responseDirectiveValidator))
}
func parseDirective(s string, ccd directiveValidator) (*TokenPair, error) {
s = strings.TrimSpace(s)
i := strings.IndexByte(s, '=')
if i == -1 {
return &TokenPair{Name: s}, nil
}
pair := &TokenPair{Name: strings.TrimSpace(s[:i])}
if len(s) <= i {
// `key=` feels like it's a parse error, but it's HTTP...
// for now, return as if nothing happened.
return pair, nil
}
v := strings.TrimSpace(s[i+1:])
switch ccd.Validate(pair.Name) {
case TokenOnly:
if v[0] == '"' {
return nil, fmt.Errorf(`invalid value for %s (quoted string not allowed)`, pair.Name)
}
case QuotedStringOnly: // quoted-string only
if v[0] != '"' {
return nil, fmt.Errorf(`invalid value for %s (bare token not allowed)`, pair.Name)
}
tmp, err := strconv.Unquote(v)
if err != nil {
return nil, fmt.Errorf(`malformed quoted string in token`)
}
v = tmp
case AnyTokenValue:
if v[0] == '"' {
tmp, err := strconv.Unquote(v)
if err != nil {
return nil, fmt.Errorf(`malformed quoted string in token`)
}
v = tmp
}
case NoArgument:
if len(v) > 0 {
return nil, fmt.Errorf(`received argument to directive %s`, pair.Name)
}
}
pair.Value = v
return pair, nil
}
func ParseResponseDirectives(s string) ([]*TokenPair, error) {
return parseDirectives(s, ParseResponseDirective)
}
func ParseRequestDirectives(s string) ([]*TokenPair, error) {
return parseDirectives(s, ParseRequestDirective)
}
func parseDirectives(s string, p func(string) (*TokenPair, error)) ([]*TokenPair, error) {
scanner := bufio.NewScanner(strings.NewReader(s))
scanner.Split(scanCommaSeparatedWords)
var tokens []*TokenPair
for scanner.Scan() {
tok, err := p(scanner.Text())
if err != nil {
return nil, fmt.Errorf(`failed to parse token #%d: %w`, len(tokens)+1, err)
}
tokens = append(tokens, tok)
}
return tokens, nil
}
// isSpace reports whether the character is a Unicode white space character.
// We avoid dependency on the unicode package, but check validity of the implementation
// in the tests.
func isSpace(r rune) bool {
if r <= '\u00FF' {
// Obvious ASCII ones: \t through \r plus space. Plus two Latin-1 oddballs.
switch r {
case ' ', '\t', '\n', '\v', '\f', '\r':
return true
case '\u0085', '\u00A0':
return true
}
return false
}
// High-valued ones.
if '\u2000' <= r && r <= '\u200a' {
return true
}
switch r {
case '\u1680', '\u2028', '\u2029', '\u202f', '\u205f', '\u3000':
return true
}
return false
}
func scanCommaSeparatedWords(data []byte, atEOF bool) (advance int, token []byte, err error) {
// Skip leading spaces.
start := 0
for width := 0; start < len(data); start += width {
var r rune
r, width = utf8.DecodeRune(data[start:])
if !isSpace(r) {
break
}
}
// Scan until we find a comma. Keep track of consecutive whitespaces
// so we remove them from the end result
var ws int
for width, i := 0, start; i < len(data); i += width {
var r rune
r, width = utf8.DecodeRune(data[i:])
switch {
case isSpace(r):
ws++
case r == ',':
return i + width, data[start : i-ws], nil
default:
ws = 0
}
}
// If we're at EOF, we have a final, non-empty, non-terminated word. Return it.
if atEOF && len(data) > start {
return len(data), data[start : len(data)-ws], nil
}
// Request more data.
return start, nil, nil
}
// ParseRequest parses the content of `Cache-Control` header of an HTTP Request.
func ParseRequest(v string) (*RequestDirective, error) {
var dir RequestDirective
tokens, err := ParseRequestDirectives(v)
if err != nil {
return nil, fmt.Errorf(`failed to parse tokens: %w`, err)
}
for _, token := range tokens {
name := strings.ToLower(token.Name)
switch name {
case MaxAge:
iv, err := strconv.ParseUint(token.Value, 10, 64)
if err != nil {
return nil, fmt.Errorf(`failed to parse max-age: %w`, err)
}
dir.maxAge = &iv
case MaxStale:
iv, err := strconv.ParseUint(token.Value, 10, 64)
if err != nil {
return nil, fmt.Errorf(`failed to parse max-stale: %w`, err)
}
dir.maxStale = &iv
case MinFresh:
iv, err := strconv.ParseUint(token.Value, 10, 64)
if err != nil {
return nil, fmt.Errorf(`failed to parse min-fresh: %w`, err)
}
dir.minFresh = &iv
case NoCache:
dir.noCache = true
case NoStore:
dir.noStore = true
case NoTransform:
dir.noTransform = true
case OnlyIfCached:
dir.onlyIfCached = true
default:
dir.extensions[token.Name] = token.Value
}
}
return &dir, nil
}
// ParseResponse parses the content of `Cache-Control` header of an HTTP Response.
func ParseResponse(v string) (*ResponseDirective, error) {
tokens, err := ParseResponseDirectives(v)
if err != nil {
return nil, fmt.Errorf(`failed to parse tokens: %w`, err)
}
var dir ResponseDirective
dir.extensions = make(map[string]string)
for _, token := range tokens {
name := strings.ToLower(token.Name)
switch name {
case MaxAge:
iv, err := strconv.ParseUint(token.Value, 10, 64)
if err != nil {
return nil, fmt.Errorf(`failed to parse max-age: %w`, err)
}
dir.maxAge = &iv
case NoCache:
scanner := bufio.NewScanner(strings.NewReader(token.Value))
scanner.Split(scanCommaSeparatedWords)
for scanner.Scan() {
dir.noCache = append(dir.noCache, scanner.Text())
}
case NoStore:
dir.noStore = true
case NoTransform:
dir.noTransform = true
case Public:
dir.public = true
case Private:
scanner := bufio.NewScanner(strings.NewReader(token.Value))
scanner.Split(scanCommaSeparatedWords)
for scanner.Scan() {
dir.private = append(dir.private, scanner.Text())
}
case ProxyRevalidate:
dir.proxyRevalidate = true
case SMaxAge:
iv, err := strconv.ParseUint(token.Value, 10, 64)
if err != nil {
return nil, fmt.Errorf(`failed to parse s-maxage: %w`, err)
}
dir.sMaxAge = &iv
default:
dir.extensions[token.Name] = token.Value
}
}
return &dir, nil
}

15
vendor/github.com/lestrrat-go/httprc/v3/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,15 @@
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary, built with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Dependency directories (remove the comment below to include it)
# vendor/

95
vendor/github.com/lestrrat-go/httprc/v3/.golangci.yml generated vendored Normal file
View File

@@ -0,0 +1,95 @@
version: "2"
linters:
default: all
disable:
- cyclop
- depguard
- dupl
- errorlint
- exhaustive
- forbidigo
- funcorder
- funlen
- gochecknoglobals
- gochecknoinits
- gocognit
- gocritic
- gocyclo
- godot
- godox
- gosec
- gosmopolitan
- govet
- inamedparam
- ireturn
- lll
- maintidx
- makezero
- mnd
- nakedret
- nestif
- nlreturn
- noinlineerr
- nonamedreturns
- paralleltest
- tagliatelle
- testpackage
- thelper
- varnamelen
- wrapcheck
- wsl
- wsl_v5
settings:
govet:
disable:
- shadow
- fieldalignment
enable-all: true
exclusions:
generated: lax
presets:
- comments
- common-false-positives
- legacy
- std-error-handling
rules:
- linters:
- staticcheck
path: /*.go
text: 'ST1003: should not use underscores in package names'
- linters:
- revive
path: /*.go
text: don't use an underscore in package name
- linters:
- contextcheck
- exhaustruct
path: /*.go
- linters:
- errcheck
path: /main.go
- linters:
- errcheck
- errchkjson
- forcetypeassert
path: /*_test.go
- linters:
- forbidigo
path: /*_example_test.go
paths:
- third_party$
- builtin$
- examples$
issues:
max-issues-per-linter: 0
max-same-issues: 0
formatters:
enable:
- gofmt
- goimports
exclusions:
generated: lax
paths:
- third_party$
- builtin$
- examples$

30
vendor/github.com/lestrrat-go/httprc/v3/Changes generated vendored Normal file
View File

@@ -0,0 +1,30 @@
Changes
=======
v3.0.1 18 Aug 2025
* Refresh() no longer requires the resource to be ready.
v3.0.0 5 Jun 2025
[Breaking Changes]
* The entire API has been re-imagined for Go versions that allow typed parameters
v2.0.0 19 Feb 2024
[Breaking Changes]
* `Fetcher` type is no longer available. You probably want to provide
a customg HTTP client instead via httprc.WithHTTPClient()).
*
v1.0.4 19 Jul 2022
* Fix sloppy API breakage
v1.0.3 19 Jul 2022
* Fix queue insertion in the middle of the queue (#7)
v1.0.2 13 Jun 2022
* Properly release a lock when the fetch fails (#5)
v1.0.1 29 Mar 2022
* Bump dependency for github.com/lestrrat-go/httpcc to v1.0.1
v1.0.0 29 Mar 2022
* Initial release, refactored out of github.com/lestrrat-go/jwx

21
vendor/github.com/lestrrat-go/httprc/v3/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2022 lestrrat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

172
vendor/github.com/lestrrat-go/httprc/v3/README.md generated vendored Normal file
View File

@@ -0,0 +1,172 @@
# github.com/lestrrat-go/httprc/v3 ![](https://github.com/lestrrat-go/httprc/v3/workflows/CI/badge.svg) [![Go Reference](https://pkg.go.dev/badge/github.com/lestrrat-go/httprc/v3.svg)](https://pkg.go.dev/github.com/lestrrat-go/httprc/v3)
`httprc` is a HTTP "Refresh" Cache. Its aim is to cache a remote resource that
can be fetched via HTTP, but keep the cached content up-to-date based on periodic
refreshing.
# Client
A `httprc.Client` object is comprised of 3 parts: The user-facing controller API,
the main controller loop, and set of workers that perform the actual fetching.
The user-facing controller API is the object returned when you call `(httprc.Client).Start`.
```go
ctrl, _ := client.Start(ctx)
```
# Controller API
The controller API gives you access to the controller backend that runs asynchronously.
All methods take a `context.Context` object because they potentially block. You should
be careful to use `context.WithTimeout` to properly set a timeout if you cannot tolerate
a blocking operation.
# Main Controller Loop
The main controller loop is run asynchronously to the controller API. It is single threaded,
and it has two reponsibilities.
The first is to receive commands from the controller API,
and appropriately modify the state of the goroutine, i.e. modify the list of resources
it is watching, performing forced refreshes, etc.
The other is to periodically wake up and go through the list of resources and re-fetch
ones that are past their TTL (in reality, each resource carry a "next-check" time, not
a TTL). The main controller loop itself does nothing more: it just kicks these checks periodically.
The interval between fetches is changed dynamically based on either the metadata carried
with the HTTP responses, such as `Cache-Control` and `Expires` headers, or a constant
interval set by the user for a given resource. Between these values, the main controller loop
will pick the shortest interval (but no less than 1 second) and checks if resources
need updating based on that value.
For example, if a resource A has an expiry of 10 minutes and if resource has an expiry of 5
minutes, the main controller loop will attempt to wake up roughly every 5 minutes to check
on the resources.
When the controller loop detects that a resource needs to be checked for freshness,
it will send the resource to the worker pool to be synced.
# Interval calculation
After the resource is synced, the next fetch is scheduled. The interval to the next
fetch is calculated either by using constant intervals, or by heuristics using values
from the `http.Response` object.
If the constant interval is specified, no extra calculation is performed. If you specify
a constant interval of 15 minutes, the resource will be checked every 15 minutes. This is
predictable and reliable, but not necessarily efficient.
If you do not specify a constant interval, the HTTP response is analyzed for
values in `Cache-Control` and `Expires` headers. These values will be compared against
a maximum and minimum interval values, which default to 30 days and 15 minutes, respectively.
If the values obtained from the headers fall within that range, the value from the header is
used. If the value is larger than the maximum, the maximum is used. If the value is lower
than the minimum, the minimum is used.
# SYNOPSIS
<!-- INCLUDE(client_example_test.go) -->
```go
package httprc_test
import (
"context"
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"time"
"github.com/lestrrat-go/httprc/v3"
)
func ExampleClient() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
type HelloWorld struct {
Hello string `json:"hello"`
}
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
json.NewEncoder(w).Encode(map[string]string{"hello": "world"})
}))
options := []httprc.NewClientOption{
// By default the client will allow all URLs (which is what the option
// below is explicitly specifying). If you want to restrict what URLs
// are allowed, you can specify another whitelist.
//
// httprc.WithWhitelist(httprc.NewInsecureWhitelist()),
}
// If you would like to handle errors from asynchronous workers, you can specify a error sink.
// This is disabled in this example because the trace logs are dynamic
// and thus would interfere with the runnable example test.
// options = append(options, httprc.WithErrorSink(errsink.NewSlog(slog.New(slog.NewJSONHandler(os.Stdout, nil)))))
// If you would like to see the trace logs, you can specify a trace sink.
// This is disabled in this example because the trace logs are dynamic
// and thus would interfere with the runnable example test.
// options = append(options, httprc.WithTraceSink(tracesink.NewSlog(slog.New(slog.NewJSONHandler(os.Stdout, nil)))))
// Create a new client
cl := httprc.NewClient(options...)
// Start the client, and obtain a Controller object
ctrl, err := cl.Start(ctx)
if err != nil {
fmt.Println(err.Error())
return
}
// The following is required if you want to make sure that there are no
// dangling goroutines hanging around when you exit. For example, if you
// are running tests to check for goroutine leaks, you should call this
// function before the end of your test.
defer ctrl.Shutdown(time.Second)
// Create a new resource that is synchronized every so often
//
// By default the client will attempt to fetch the resource once
// as soon as it can, and then if no other metadata is provided,
// it will fetch the resource every 15 minutes.
//
// If the resource responds with a Cache-Control/Expires header,
// the client will attempt to respect that, and will try to fetch
// the resource again based on the values obatained from the headers.
r, err := httprc.NewResource[HelloWorld](srv.URL, httprc.JSONTransformer[HelloWorld]())
if err != nil {
fmt.Println(err.Error())
return
}
// Add the resource to the controller, so that it starts fetching.
// By default, a call to `Add()` will block until the first fetch
// succeeds, via an implicit call to `r.Ready()`
// You can change this behavior if you specify the `WithWaitReady(false)`
// option.
ctrl.Add(ctx, r)
// if you specified `httprc.WithWaitReady(false)` option, the fetch will happen
// "soon", but you're not guaranteed that it will happen before the next
// call to `Lookup()`. If you want to make sure that the resource is ready,
// you can call `Ready()` like so:
/*
{
tctx, tcancel := context.WithTimeout(ctx, time.Second)
defer tcancel()
if err := r.Ready(tctx); err != nil {
fmt.Println(err.Error())
return
}
}
*/
m := r.Resource()
fmt.Println(m.Hello)
// OUTPUT:
// world
}
```
source: [client_example_test.go](https://github.com/lestrrat-go/httprc/blob/refs/heads/v3/client_example_test.go)
<!-- END INCLUDE -->

235
vendor/github.com/lestrrat-go/httprc/v3/backend.go generated vendored Normal file
View File

@@ -0,0 +1,235 @@
package httprc
import (
"context"
"fmt"
"sync"
"time"
)
func (c *ctrlBackend) adjustInterval(ctx context.Context, req adjustIntervalRequest) {
interval := roundupToSeconds(time.Until(req.resource.Next()))
c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: got adjust request (current tick interval=%s, next for %q=%s)", c.tickInterval, req.resource.URL(), interval))
if interval < time.Second {
interval = time.Second
}
if c.tickInterval < interval {
c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: no adjusting required (time to next check %s > current tick interval %s)", interval, c.tickInterval))
} else {
c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: adjusting tick interval to %s", interval))
c.tickInterval = interval
c.check.Reset(interval)
}
}
func (c *ctrlBackend) addResource(ctx context.Context, req addRequest) {
r := req.resource
if _, ok := c.items[r.URL()]; ok {
// Already exists
sendReply(ctx, req.reply, struct{}{}, errResourceAlreadyExists)
return
}
c.items[r.URL()] = r
if r.MaxInterval() == 0 {
r.SetMaxInterval(c.defaultMaxInterval)
}
if r.MinInterval() == 0 {
c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: set minimum interval to %s", c.defaultMinInterval))
r.SetMinInterval(c.defaultMinInterval)
}
c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: added resource %q", r.URL()))
sendReply(ctx, req.reply, struct{}{}, nil)
c.SetTickInterval(time.Nanosecond)
}
func (c *ctrlBackend) rmResource(ctx context.Context, req rmRequest) {
u := req.u
if _, ok := c.items[u]; !ok {
sendReply(ctx, req.reply, struct{}{}, errResourceNotFound)
return
}
delete(c.items, u)
minInterval := oneDay
for _, item := range c.items {
if d := item.MinInterval(); d < minInterval {
minInterval = d
}
}
close(req.reply)
c.check.Reset(minInterval)
}
func (c *ctrlBackend) refreshResource(ctx context.Context, req refreshRequest) {
c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: [refresh] START %q", req.u))
defer c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: [refresh] END %q", req.u))
u := req.u
r, ok := c.items[u]
if !ok {
c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: [refresh] %s is not registered", req.u))
sendReply(ctx, req.reply, struct{}{}, errResourceNotFound)
return
}
// Note: We don't wait for r.Ready() here because refresh should work
// regardless of whether the resource has been fetched before. This allows
// refresh to work with resources registered using WithWaitReady(false).
r.SetNext(time.Unix(0, 0))
sendWorkerSynchronous(ctx, c.syncoutgoing, synchronousRequest{
resource: r,
reply: req.reply,
})
c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: [refresh] sync request for %s sent to worker pool", req.u))
}
func (c *ctrlBackend) lookupResource(ctx context.Context, req lookupRequest) {
u := req.u
r, ok := c.items[u]
if !ok {
sendReply(ctx, req.reply, nil, errResourceNotFound)
return
}
sendReply(ctx, req.reply, r, nil)
}
func (c *ctrlBackend) handleRequest(ctx context.Context, req any) {
switch req := req.(type) {
case adjustIntervalRequest:
c.adjustInterval(ctx, req)
case addRequest:
c.addResource(ctx, req)
case rmRequest:
c.rmResource(ctx, req)
case refreshRequest:
c.refreshResource(ctx, req)
case lookupRequest:
c.lookupResource(ctx, req)
default:
c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: unknown request type %T", req))
}
}
func sendWorker(ctx context.Context, ch chan Resource, r Resource) {
r.SetBusy(true)
select {
case <-ctx.Done():
case ch <- r:
}
}
func sendWorkerSynchronous(ctx context.Context, ch chan synchronousRequest, r synchronousRequest) {
r.resource.SetBusy(true)
select {
case <-ctx.Done():
case ch <- r:
}
}
func sendReply[T any](ctx context.Context, ch chan backendResponse[T], v T, err error) {
defer close(ch)
select {
case <-ctx.Done():
case ch <- backendResponse[T]{payload: v, err: err}:
}
}
type ctrlBackend struct {
items map[string]Resource
outgoing chan Resource
syncoutgoing chan synchronousRequest
incoming chan any // incoming requests to the controller
traceSink TraceSink
tickInterval time.Duration
check *time.Ticker
defaultMaxInterval time.Duration
defaultMinInterval time.Duration
}
func (c *ctrlBackend) loop(ctx context.Context, readywg, donewg *sync.WaitGroup) {
c.traceSink.Put(ctx, "httprc controller: starting main controller loop")
readywg.Done()
defer c.traceSink.Put(ctx, "httprc controller: stopping main controller loop")
defer donewg.Done()
for {
c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: waiting for request or tick (tick interval=%s)", c.tickInterval))
select {
case req := <-c.incoming:
c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: got request %T", req))
c.handleRequest(ctx, req)
case t := <-c.check.C:
c.periodicCheck(ctx, t)
case <-ctx.Done():
return
}
}
}
func (c *ctrlBackend) periodicCheck(ctx context.Context, t time.Time) {
c.traceSink.Put(ctx, "httprc controller: START periodic check")
defer c.traceSink.Put(ctx, "httprc controller: END periodic check")
var minNext time.Time
var dispatched int
minInterval := -1 * time.Second
for _, item := range c.items {
c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: checking resource %q", item.URL()))
next := item.Next()
if minNext.IsZero() || next.Before(minNext) {
minNext = next
}
if interval := item.MinInterval(); minInterval < 0 || interval < minInterval {
minInterval = interval
}
c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: resource %q isBusy=%t, next(%s).After(%s)=%t", item.URL(), item.IsBusy(), next, t, next.After(t)))
if item.IsBusy() || next.After(t) {
c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: resource %q is busy or not ready yet, skipping", item.URL()))
continue
}
c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: resource %q is ready, dispatching to worker pool", item.URL()))
dispatched++
c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: dispatching resource %q to worker pool", item.URL()))
sendWorker(ctx, c.outgoing, item)
}
c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: dispatched %d resources", dispatched))
// Next check is always at the earliest next check + 1 second.
// The extra second makes sure that we are _past_ the actual next check time
// so we can send the resource to the worker pool
if interval := time.Until(minNext); interval > 0 {
c.SetTickInterval(roundupToSeconds(interval) + time.Second)
c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: resetting check intervanl to %s", c.tickInterval))
} else {
// if we got here, either we have no resources, or all resources are busy.
// In this state, it's possible that the interval is less than 1 second,
// because we previously set it to a small value for an immediate refresh.
// in this case, we want to reset it to a sane value
if c.tickInterval < time.Second {
c.SetTickInterval(minInterval)
c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: resetting check intervanl to %s after forced refresh", c.tickInterval))
}
}
c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: next check in %s", c.tickInterval))
}
func (c *ctrlBackend) SetTickInterval(d time.Duration) {
// TODO synchronize
if d <= 0 {
d = time.Second // ensure positive interval
}
c.tickInterval = d
c.check.Reset(d)
}

183
vendor/github.com/lestrrat-go/httprc/v3/client.go generated vendored Normal file
View File

@@ -0,0 +1,183 @@
package httprc
import (
"context"
"net/http"
"sync"
"time"
"github.com/lestrrat-go/httprc/v3/errsink"
"github.com/lestrrat-go/httprc/v3/proxysink"
"github.com/lestrrat-go/httprc/v3/tracesink"
)
// setupSink creates and starts a proxy for the given sink if it's not a Nop sink
// Returns the sink to use and a cancel function that should be chained with the original cancel
func setupSink[T any, S proxysink.Backend[T], NopType any](ctx context.Context, sink S, wg *sync.WaitGroup) (S, context.CancelFunc) {
if _, ok := any(sink).(NopType); ok {
return sink, func() {}
}
proxy := proxysink.New[T](sink)
wg.Add(1)
go func(ctx context.Context, wg *sync.WaitGroup, proxy *proxysink.Proxy[T]) {
defer wg.Done()
proxy.Run(ctx)
}(ctx, wg, proxy)
// proxy can be converted to one of the sink subtypes
s, ok := any(proxy).(S)
if !ok {
panic("type assertion failed: proxy cannot be converted to type S")
}
return s, proxy.Close
}
// Client is the main entry point for the httprc package.
type Client struct {
mu sync.Mutex
httpcl HTTPClient
numWorkers int
running bool
errSink ErrorSink
traceSink TraceSink
wl Whitelist
defaultMaxInterval time.Duration
defaultMinInterval time.Duration
}
// NewClient creates a new `httprc.Client` object.
//
// By default ALL urls are allowed. This may not be suitable for you if
// are using this in a production environment. You are encouraged to specify
// a whitelist using the `WithWhitelist` option.
func NewClient(options ...NewClientOption) *Client {
//nolint:staticcheck
var errSink ErrorSink = errsink.NewNop()
//nolint:staticcheck
var traceSink TraceSink = tracesink.NewNop()
var wl Whitelist = InsecureWhitelist{}
var httpcl HTTPClient = http.DefaultClient
defaultMinInterval := DefaultMinInterval
defaultMaxInterval := DefaultMaxInterval
numWorkers := DefaultWorkers
//nolint:forcetypeassert
for _, option := range options {
switch option.Ident() {
case identHTTPClient{}:
httpcl = option.Value().(HTTPClient)
case identWorkers{}:
numWorkers = option.Value().(int)
case identErrorSink{}:
errSink = option.Value().(ErrorSink)
case identTraceSink{}:
traceSink = option.Value().(TraceSink)
case identWhitelist{}:
wl = option.Value().(Whitelist)
}
}
if numWorkers <= 0 {
numWorkers = 1
}
return &Client{
httpcl: httpcl,
numWorkers: numWorkers,
errSink: errSink,
traceSink: traceSink,
wl: wl,
defaultMinInterval: defaultMinInterval,
defaultMaxInterval: defaultMaxInterval,
}
}
// Start sets the client into motion. It will start a number of worker goroutines,
// and return a Controller object that you can use to control the execution of
// the client.
//
// If you attempt to call Start more than once, it will return an error.
func (c *Client) Start(octx context.Context) (Controller, error) {
c.mu.Lock()
if c.running {
c.mu.Unlock()
return nil, errAlreadyRunning
}
c.running = true
c.mu.Unlock()
// DON'T CANCEL THIS IN THIS METHOD! It's the responsibility of the
// controller to cancel this context.
ctx, cancel := context.WithCancel(octx)
var donewg sync.WaitGroup
// start proxy goroutines that will accept sink requests
// and forward them to the appropriate sink
errSink, errCancel := setupSink[error, ErrorSink, errsink.Nop](ctx, c.errSink, &donewg)
traceSink, traceCancel := setupSink[string, TraceSink, tracesink.Nop](ctx, c.traceSink, &donewg)
// Chain the cancel functions
ocancel := cancel
cancel = func() {
ocancel()
errCancel()
traceCancel()
}
chbuf := c.numWorkers + 1
incoming := make(chan any, chbuf)
outgoing := make(chan Resource, chbuf)
syncoutgoing := make(chan synchronousRequest, chbuf)
var readywg sync.WaitGroup
readywg.Add(c.numWorkers)
donewg.Add(c.numWorkers)
for range c.numWorkers {
wrk := worker{
incoming: incoming,
next: outgoing,
nextsync: syncoutgoing,
errSink: errSink,
traceSink: traceSink,
httpcl: c.httpcl,
}
go wrk.Run(ctx, &readywg, &donewg)
}
tickInterval := oneDay
ctrl := &controller{
cancel: cancel,
incoming: incoming,
shutdown: make(chan struct{}),
traceSink: traceSink,
wl: c.wl,
}
backend := &ctrlBackend{
items: make(map[string]Resource),
outgoing: outgoing,
syncoutgoing: syncoutgoing,
incoming: incoming,
traceSink: traceSink,
tickInterval: tickInterval,
check: time.NewTicker(tickInterval),
defaultMinInterval: c.defaultMinInterval,
defaultMaxInterval: c.defaultMaxInterval,
}
donewg.Add(1)
readywg.Add(1)
go backend.loop(ctx, &readywg, &donewg)
go func(wg *sync.WaitGroup, ch chan struct{}) {
wg.Wait()
close(ch)
}(&donewg, ctrl.shutdown)
readywg.Wait()
return ctrl, nil
}

186
vendor/github.com/lestrrat-go/httprc/v3/controller.go generated vendored Normal file
View File

@@ -0,0 +1,186 @@
package httprc
import (
"context"
"fmt"
"time"
)
type Controller interface {
// Add adds a new `http.Resource` to the controller. If the resource already exists,
// it will return an error.
Add(context.Context, Resource, ...AddOption) error
// Lookup a `httprc.Resource` by its URL. If the resource does not exist, it
// will return an error.
Lookup(context.Context, string) (Resource, error)
// Remove a `httprc.Resource` from the controller by its URL. If the resource does
// not exist, it will return an error.
Remove(context.Context, string) error
// Refresh forces a resource to be refreshed immediately. If the resource does
// not exist, or if the refresh fails, it will return an error.
Refresh(context.Context, string) error
ShutdownContext(context.Context) error
Shutdown(time.Duration) error
}
type controller struct {
cancel context.CancelFunc
incoming chan any // incoming requests to the controller
shutdown chan struct{}
traceSink TraceSink
wl Whitelist
}
// Shutdown is a convenience function that calls ShutdownContext with a
// context that has a timeout of `timeout`.
func (c *controller) Shutdown(timeout time.Duration) error {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
return c.ShutdownContext(ctx)
}
// ShutdownContext stops the client and all associated goroutines, and waits for them
// to finish. If the context is canceled, the function will return immediately:
// there fore you should not use the context you used to start the client (because
// presumably it's already canceled).
//
// Waiting for the client shutdown will also ensure that all sinks are properly
// flushed.
func (c *controller) ShutdownContext(ctx context.Context) error {
c.cancel()
select {
case <-ctx.Done():
return ctx.Err()
case <-c.shutdown:
return nil
}
}
type ctrlRequest[T any] struct {
reply chan T
resource Resource
u string
}
type addRequest ctrlRequest[backendResponse[struct{}]]
type rmRequest ctrlRequest[backendResponse[struct{}]]
type refreshRequest ctrlRequest[backendResponse[struct{}]]
type lookupRequest ctrlRequest[backendResponse[Resource]]
type synchronousRequest ctrlRequest[backendResponse[struct{}]]
type adjustIntervalRequest struct {
resource Resource
}
type backendResponse[T any] struct {
payload T
err error
}
func sendBackend[TReq any, TB any](ctx context.Context, backendCh chan any, v TReq, replyCh chan backendResponse[TB]) (TB, error) {
select {
case <-ctx.Done():
case backendCh <- v:
}
select {
case <-ctx.Done():
var zero TB
return zero, ctx.Err()
case res := <-replyCh:
return res.payload, res.err
}
}
// Lookup returns a resource by its URL. If the resource does not exist, it
// will return an error.
//
// Unfortunately, due to the way typed parameters are handled in Go, we can only
// return a Resource object (and not a ResourceBase[T] object). This means that
// you will either need to use the `Resource.Get()` method or use a type
// assertion to obtain a `ResourceBase[T]` to get to the actual object you are
// looking for
func (c *controller) Lookup(ctx context.Context, u string) (Resource, error) {
reply := make(chan backendResponse[Resource], 1)
req := lookupRequest{
reply: reply,
u: u,
}
return sendBackend[lookupRequest, Resource](ctx, c.incoming, req, reply)
}
// Add adds a new resource to the controller. If the resource already
// exists, it will return an error.
//
// By default this function will automatically wait for the resource to be
// fetched once (by calling `r.Ready()`). Note that the `r.Ready()` call will NOT
// timeout unless you configure your context object with `context.WithTimeout`.
// To disable waiting, you can specify the `WithWaitReady(false)` option.
func (c *controller) Add(ctx context.Context, r Resource, options ...AddOption) error {
c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: START Add(%q)", r.URL()))
defer c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: END Add(%q)", r.URL()))
waitReady := true
//nolint:forcetypeassert
for _, option := range options {
switch option.Ident() {
case identWaitReady{}:
waitReady = option.(addOption).Value().(bool)
}
}
if !c.wl.IsAllowed(r.URL()) {
return fmt.Errorf(`httprc.Controller.AddResource: cannot add %q: %w`, r.URL(), errBlockedByWhitelist)
}
reply := make(chan backendResponse[struct{}], 1)
req := addRequest{
reply: reply,
resource: r,
}
c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: sending add request for %q to backend", r.URL()))
if _, err := sendBackend[addRequest, struct{}](ctx, c.incoming, req, reply); err != nil {
return err
}
if waitReady {
c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: waiting for resource %q to be ready", r.URL()))
if err := r.Ready(ctx); err != nil {
return err
}
}
return nil
}
// Remove removes a resource from the controller. If the resource does
// not exist, it will return an error.
func (c *controller) Remove(ctx context.Context, u string) error {
reply := make(chan backendResponse[struct{}], 1)
req := rmRequest{
reply: reply,
u: u,
}
if _, err := sendBackend[rmRequest, struct{}](ctx, c.incoming, req, reply); err != nil {
return err
}
return nil
}
// Refresh forces a resource to be refreshed immediately. If the resource does
// not exist, or if the refresh fails, it will return an error.
//
// This function is synchronous, and will block until the resource has been refreshed.
func (c *controller) Refresh(ctx context.Context, u string) error {
reply := make(chan backendResponse[struct{}], 1)
req := refreshRequest{
reply: reply,
u: u,
}
if _, err := sendBackend[refreshRequest, struct{}](ctx, c.incoming, req, reply); err != nil {
return err
}
return nil
}

57
vendor/github.com/lestrrat-go/httprc/v3/errors.go generated vendored Normal file
View File

@@ -0,0 +1,57 @@
package httprc
import "errors"
var errResourceAlreadyExists = errors.New(`resource already exists`)
func ErrResourceAlreadyExists() error {
return errResourceAlreadyExists
}
var errAlreadyRunning = errors.New(`client is already running`)
func ErrAlreadyRunning() error {
return errAlreadyRunning
}
var errResourceNotFound = errors.New(`resource not found`)
func ErrResourceNotFound() error {
return errResourceNotFound
}
var errTransformerRequired = errors.New(`transformer is required`)
func ErrTransformerRequired() error {
return errTransformerRequired
}
var errURLCannotBeEmpty = errors.New(`URL cannot be empty`)
func ErrURLCannotBeEmpty() error {
return errURLCannotBeEmpty
}
var errUnexpectedStatusCode = errors.New(`unexpected status code`)
func ErrUnexpectedStatusCode() error {
return errUnexpectedStatusCode
}
var errTransformerFailed = errors.New(`failed to transform response body`)
func ErrTransformerFailed() error {
return errTransformerFailed
}
var errRecoveredFromPanic = errors.New(`recovered from panic`)
func ErrRecoveredFromPanic() error {
return errRecoveredFromPanic
}
var errBlockedByWhitelist = errors.New(`blocked by whitelist`)
func ErrBlockedByWhitelist() error {
return errBlockedByWhitelist
}

View File

@@ -0,0 +1,59 @@
package errsink
import (
"context"
"log/slog"
)
type Interface interface {
Put(context.Context, error)
}
// Nop is an ErrorSink that does nothing. It does not require
// any initialization, so the zero value can be used.
type Nop struct{}
// NewNop returns a new NopErrorSink object. The constructor
// is provided for consistency.
func NewNop() Interface {
return Nop{}
}
// Put for NopErrorSink does nothing.
func (Nop) Put(context.Context, error) {}
type SlogLogger interface {
Log(context.Context, slog.Level, string, ...any)
}
type slogSink struct {
logger SlogLogger
}
// NewSlog returns a new ErrorSink that logs errors using the provided slog.Logger
func NewSlog(l SlogLogger) Interface {
return &slogSink{
logger: l,
}
}
func (s *slogSink) Put(ctx context.Context, v error) {
s.logger.Log(ctx, slog.LevelError, v.Error())
}
// FuncSink is an ErrorSink that calls a function with the error.
type FuncSink struct {
fn func(context.Context, error)
}
// NewFunc returns a new FuncSink that calls the provided function with errors.
func NewFunc(fn func(context.Context, error)) Interface {
return &FuncSink{fn: fn}
}
// Put calls the function with the error.
func (f *FuncSink) Put(ctx context.Context, err error) {
if f.fn != nil {
f.fn(ctx, err)
}
}

90
vendor/github.com/lestrrat-go/httprc/v3/httprc.go generated vendored Normal file
View File

@@ -0,0 +1,90 @@
package httprc
import (
"context"
"net/http"
"time"
"github.com/lestrrat-go/httprc/v3/errsink"
"github.com/lestrrat-go/httprc/v3/tracesink"
)
// Buffer size constants
const (
// ReadBufferSize is the default buffer size for reading HTTP responses (10MB)
ReadBufferSize = 1024 * 1024 * 10
// MaxBufferSize is the maximum allowed buffer size (1GB)
MaxBufferSize = 1024 * 1024 * 1000
)
// Client worker constants
const (
// DefaultWorkers is the default number of worker goroutines
DefaultWorkers = 5
)
// Interval constants
const (
// DefaultMaxInterval is the default maximum interval between fetches (30 days)
DefaultMaxInterval = 24 * time.Hour * 30
// DefaultMinInterval is the default minimum interval between fetches (15 minutes)
DefaultMinInterval = 15 * time.Minute
// oneDay is used internally for time calculations
oneDay = 24 * time.Hour
)
// utility to round up intervals to the nearest second
func roundupToSeconds(d time.Duration) time.Duration {
if diff := d % time.Second; diff > 0 {
return d + time.Second - diff
}
return d
}
// ErrorSink is an interface that abstracts a sink for errors.
type ErrorSink = errsink.Interface
type TraceSink = tracesink.Interface
// HTTPClient is an interface that abstracts a "net/http".Client, so that
// users can provide their own implementation of the HTTP client, if need be.
type HTTPClient interface {
Do(*http.Request) (*http.Response, error)
}
// Transformer is used to convert the body of an HTTP response into an appropriate
// object of type T.
type Transformer[T any] interface {
Transform(context.Context, *http.Response) (T, error)
}
// TransformFunc is a function type that implements the Transformer interface.
type TransformFunc[T any] func(context.Context, *http.Response) (T, error)
func (f TransformFunc[T]) Transform(ctx context.Context, res *http.Response) (T, error) {
return f(ctx, res)
}
// Resource is a single resource that can be retrieved via HTTP, and (possibly) transformed
// into an arbitrary object type.
//
// Realistically, there is no need for third-parties to implement this interface. This exists
// to provide a way to aggregate `httprc.ResourceBase` objects with different specialized types
// into a single collection.
//
// See ResourceBase for details
type Resource interface { //nolint:interfacebloat
Get(any) error
Next() time.Time
SetNext(time.Time)
URL() string
Sync(context.Context) error
ConstantInterval() time.Duration
MaxInterval() time.Duration
SetMaxInterval(time.Duration)
MinInterval() time.Duration
SetMinInterval(time.Duration)
IsBusy() bool
SetBusy(bool)
Ready(context.Context) error
}

144
vendor/github.com/lestrrat-go/httprc/v3/options.go generated vendored Normal file
View File

@@ -0,0 +1,144 @@
package httprc
import (
"time"
"github.com/lestrrat-go/option"
)
type NewClientOption interface {
option.Interface
newClientOption()
}
type newClientOption struct {
option.Interface
}
func (newClientOption) newClientOption() {}
type identWorkers struct{}
// WithWorkers specifies the number of concurrent workers to use for the client.
// If n is less than or equal to 0, the client will use a single worker.
func WithWorkers(n int) NewClientOption {
return newClientOption{option.New(identWorkers{}, n)}
}
type identErrorSink struct{}
// WithErrorSink specifies the error sink to use for the client.
// If not specified, the client will use a NopErrorSink.
func WithErrorSink(sink ErrorSink) NewClientOption {
return newClientOption{option.New(identErrorSink{}, sink)}
}
type identTraceSink struct{}
// WithTraceSink specifies the trace sink to use for the client.
// If not specified, the client will use a NopTraceSink.
func WithTraceSink(sink TraceSink) NewClientOption {
return newClientOption{option.New(identTraceSink{}, sink)}
}
type identWhitelist struct{}
// WithWhitelist specifies the whitelist to use for the client.
// If not specified, the client will use a BlockAllWhitelist.
func WithWhitelist(wl Whitelist) NewClientOption {
return newClientOption{option.New(identWhitelist{}, wl)}
}
type NewResourceOption interface {
option.Interface
newResourceOption()
}
type newResourceOption struct {
option.Interface
}
func (newResourceOption) newResourceOption() {}
type NewClientResourceOption interface {
option.Interface
newResourceOption()
newClientOption()
}
type newClientResourceOption struct {
option.Interface
}
func (newClientResourceOption) newResourceOption() {}
func (newClientResourceOption) newClientOption() {}
type identHTTPClient struct{}
// WithHTTPClient specifies the HTTP client to use for the client.
// If not specified, the client will use http.DefaultClient.
//
// This option can be passed to NewClient or NewResource.
func WithHTTPClient(cl HTTPClient) NewClientResourceOption {
return newClientResourceOption{option.New(identHTTPClient{}, cl)}
}
type identMinimumInterval struct{}
// WithMinInterval specifies the minimum interval between fetches.
//
// This option affects the dynamic calculation of the interval between fetches.
// If the value calculated from the http.Response is less than the this value,
// the client will use this value instead.
func WithMinInterval(d time.Duration) NewResourceOption {
return newResourceOption{option.New(identMinimumInterval{}, d)}
}
type identMaximumInterval struct{}
// WithMaxInterval specifies the maximum interval between fetches.
//
// This option affects the dynamic calculation of the interval between fetches.
// If the value calculated from the http.Response is greater than the this value,
// the client will use this value instead.
func WithMaxInterval(d time.Duration) NewResourceOption {
return newResourceOption{option.New(identMaximumInterval{}, d)}
}
type identConstantInterval struct{}
// WithConstantInterval specifies the interval between fetches. When you
// specify this option, the client will fetch the resource at the specified
// intervals, regardless of the response's Cache-Control or Expires headers.
//
// By default this option is disabled.
func WithConstantInterval(d time.Duration) NewResourceOption {
return newResourceOption{option.New(identConstantInterval{}, d)}
}
type AddOption interface {
option.Interface
newAddOption()
}
type addOption struct {
option.Interface
}
func (addOption) newAddOption() {}
type identWaitReady struct{}
// WithWaitReady specifies whether the client should wait for the resource to be
// ready before returning from the Add method.
//
// By default, the client will wait for the resource to be ready before returning.
// If you specify this option with a value of false, the client will not wait for
// the resource to be fully registered, which is usually not what you want.
// This option exists to accommodate for cases where you for some reason want to
// add a resource to the controller, but want to do something else before
// you wait for it. Make sure to call `r.Ready()` later on to ensure that
// the resource is ready before you try to access it.
func WithWaitReady(b bool) AddOption {
return addOption{option.New(identWaitReady{}, b)}
}

View File

@@ -0,0 +1,135 @@
package proxysink
import (
"context"
"sync"
)
type Backend[T any] interface {
Put(context.Context, T)
}
// Proxy is used to send values through a channel. This is used to
// serialize calls to underlying sinks.
type Proxy[T any] struct {
mu *sync.Mutex
cancel context.CancelFunc
ch chan T
cond *sync.Cond
pending []T
backend Backend[T]
closed bool
}
func New[T any](b Backend[T]) *Proxy[T] {
mu := &sync.Mutex{}
return &Proxy[T]{
ch: make(chan T, 1),
mu: mu,
cond: sync.NewCond(mu),
backend: b,
cancel: func() {},
}
}
func (p *Proxy[T]) Run(ctx context.Context) {
defer p.cond.Broadcast()
p.mu.Lock()
ctx, cancel := context.WithCancel(ctx)
p.cancel = cancel
p.mu.Unlock()
go p.controlloop(ctx)
go p.flushloop(ctx)
<-ctx.Done()
}
func (p *Proxy[T]) controlloop(ctx context.Context) {
defer p.cond.Broadcast()
for {
select {
case <-ctx.Done():
return
case r := <-p.ch:
p.mu.Lock()
p.pending = append(p.pending, r)
p.mu.Unlock()
}
p.cond.Broadcast()
}
}
func (p *Proxy[T]) flushloop(ctx context.Context) {
const defaultPendingSize = 10
pending := make([]T, defaultPendingSize)
for {
select {
case <-ctx.Done():
p.mu.Lock()
if len(p.pending) <= 0 {
p.mu.Unlock()
return
}
default:
}
p.mu.Lock()
for len(p.pending) <= 0 {
select {
case <-ctx.Done():
p.mu.Unlock()
return
default:
p.cond.Wait()
}
}
// extract all pending values, and clear the shared slice
if cap(pending) < len(p.pending) {
pending = make([]T, len(p.pending))
} else {
pending = pending[:len(p.pending)]
}
copy(pending, p.pending)
if cap(p.pending) > defaultPendingSize {
p.pending = make([]T, 0, defaultPendingSize)
} else {
p.pending = p.pending[:0]
}
p.mu.Unlock()
for _, v := range pending {
// send to sink serially
p.backend.Put(ctx, v)
}
}
}
func (p *Proxy[T]) Put(ctx context.Context, v T) {
p.mu.Lock()
if p.closed {
p.mu.Unlock()
return
}
p.mu.Unlock()
select {
case <-ctx.Done():
return
case p.ch <- v:
return
}
}
func (p *Proxy[T]) Close() {
p.mu.Lock()
defer p.mu.Unlock()
if !p.closed {
p.closed = true
}
p.cancel()
p.cond.Broadcast()
}

359
vendor/github.com/lestrrat-go/httprc/v3/resource.go generated vendored Normal file
View File

@@ -0,0 +1,359 @@
package httprc
import (
"context"
"fmt"
"io"
"net/http"
"net/url"
"sync"
"sync/atomic"
"time"
"github.com/lestrrat-go/blackmagic"
"github.com/lestrrat-go/httpcc"
"github.com/lestrrat-go/httprc/v3/tracesink"
)
// ResourceBase is a generic Resource type
type ResourceBase[T any] struct {
u string
ready chan struct{} // closed when the resource is ready (i.e. after first successful fetch)
once sync.Once
httpcl HTTPClient
t Transformer[T]
r atomic.Value
next atomic.Value
interval time.Duration
minInterval atomic.Int64
maxInterval atomic.Int64
busy atomic.Bool
}
// NewResource creates a new Resource object which after fetching the
// resource from the URL, will transform the response body using the
// provided Transformer to an object of type T.
//
// This function will return an error if the URL is not a valid URL
// (i.e. it cannot be parsed by url.Parse), or if the transformer is nil.
func NewResource[T any](s string, transformer Transformer[T], options ...NewResourceOption) (*ResourceBase[T], error) {
var httpcl HTTPClient
var interval time.Duration
minInterval := DefaultMinInterval
maxInterval := DefaultMaxInterval
//nolint:forcetypeassert
for _, option := range options {
switch option.Ident() {
case identHTTPClient{}:
httpcl = option.Value().(HTTPClient)
case identMinimumInterval{}:
minInterval = option.Value().(time.Duration)
case identMaximumInterval{}:
maxInterval = option.Value().(time.Duration)
case identConstantInterval{}:
interval = option.Value().(time.Duration)
}
}
if transformer == nil {
return nil, fmt.Errorf(`httprc.NewResource: %w`, errTransformerRequired)
}
if s == "" {
return nil, fmt.Errorf(`httprc.NewResource: %w`, errURLCannotBeEmpty)
}
if _, err := url.Parse(s); err != nil {
return nil, fmt.Errorf(`httprc.NewResource: %w`, err)
}
r := &ResourceBase[T]{
u: s,
httpcl: httpcl,
t: transformer,
interval: interval,
ready: make(chan struct{}),
}
if httpcl != nil {
r.httpcl = httpcl
}
r.minInterval.Store(int64(minInterval))
r.maxInterval.Store(int64(maxInterval))
r.SetNext(time.Unix(0, 0)) // initially, it should be fetched immediately
return r, nil
}
// URL returns the URL of the resource.
func (r *ResourceBase[T]) URL() string {
return r.u
}
// Ready returns an empty error when the resource is ready. If the context
// is canceled before the resource is ready, it will return the error from
// the context.
func (r *ResourceBase[T]) Ready(ctx context.Context) error {
select {
case <-ctx.Done():
return ctx.Err()
case <-r.ready:
return nil
}
}
// Get assigns the value of the resource to the provided pointer.
// If using the `httprc.ResourceBase[T]` type directly, you can use the `Resource()`
// method to get the resource directly.
//
// This method exists because parametric types cannot be assigned to a single object type
// that return different return values of the specialized type. i.e. for resources
// `ResourceBase[A]` and `ResourceBase[B]`, we cannot have a single interface that can
// be assigned to the same interface type `X` that expects a `Resource()` method that
// returns `A` or `B` depending on the type of the resource. When accessing the
// resource through the `httprc.Resource` interface, use this method to obtain the
// stored value.
func (r *ResourceBase[T]) Get(dst interface{}) error {
return blackmagic.AssignIfCompatible(dst, r.Resource())
}
// Resource returns the last fetched resource. If the resource has not been
// fetched yet, this will return the zero value of type T.
//
// If you would rather wait until the resource is fetched, you can use the
// `Ready()` method to wait until the resource is ready (i.e. fetched at least once).
func (r *ResourceBase[T]) Resource() T {
v := r.r.Load()
switch v := v.(type) {
case T:
return v
default:
var zero T
return zero
}
}
func (r *ResourceBase[T]) Next() time.Time {
//nolint:forcetypeassert
return r.next.Load().(time.Time)
}
func (r *ResourceBase[T]) SetNext(v time.Time) {
r.next.Store(v)
}
func (r *ResourceBase[T]) ConstantInterval() time.Duration {
return r.interval
}
func (r *ResourceBase[T]) MaxInterval() time.Duration {
return time.Duration(r.maxInterval.Load())
}
func (r *ResourceBase[T]) MinInterval() time.Duration {
return time.Duration(r.minInterval.Load())
}
func (r *ResourceBase[T]) SetMaxInterval(v time.Duration) {
r.maxInterval.Store(int64(v))
}
func (r *ResourceBase[T]) SetMinInterval(v time.Duration) {
r.minInterval.Store(int64(v))
}
func (r *ResourceBase[T]) SetBusy(v bool) {
r.busy.Store(v)
}
func (r *ResourceBase[T]) IsBusy() bool {
return r.busy.Load()
}
// limitedBody is a wrapper around an io.Reader that will only read up to
// MaxBufferSize bytes. This is provided to prevent the user from accidentally
// reading a huge response body into memory
type limitedBody struct {
rdr io.Reader
close func() error
}
func (l *limitedBody) Read(p []byte) (n int, err error) {
return l.rdr.Read(p)
}
func (l *limitedBody) Close() error {
return l.close()
}
type traceSinkKey struct{}
func withTraceSink(ctx context.Context, sink TraceSink) context.Context {
return context.WithValue(ctx, traceSinkKey{}, sink)
}
func traceSinkFromContext(ctx context.Context) TraceSink {
if v := ctx.Value(traceSinkKey{}); v != nil {
//nolint:forcetypeassert
return v.(TraceSink)
}
return tracesink.Nop{}
}
type httpClientKey struct{}
func withHTTPClient(ctx context.Context, cl HTTPClient) context.Context {
return context.WithValue(ctx, httpClientKey{}, cl)
}
func httpClientFromContext(ctx context.Context) HTTPClient {
if v := ctx.Value(httpClientKey{}); v != nil {
//nolint:forcetypeassert
return v.(HTTPClient)
}
return http.DefaultClient
}
func (r *ResourceBase[T]) Sync(ctx context.Context) error {
traceSink := traceSinkFromContext(ctx)
httpcl := r.httpcl
if httpcl == nil {
httpcl = httpClientFromContext(ctx)
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, r.u, nil)
if err != nil {
return fmt.Errorf(`httprc.Resource.Sync: failed to create request: %w`, err)
}
traceSink.Put(ctx, fmt.Sprintf("httprc.Resource.Sync: fetching %q", r.u))
res, err := httpcl.Do(req)
if err != nil {
return fmt.Errorf(`httprc.Resource.Sync: failed to execute HTTP request: %w`, err)
}
defer res.Body.Close()
next := r.calculateNextRefreshTime(ctx, res)
traceSink.Put(ctx, fmt.Sprintf("httprc.Resource.Sync: next refresh time for %q is %v", r.u, next))
r.SetNext(next)
if res.StatusCode != http.StatusOK {
return fmt.Errorf(`httprc.Resource.Sync: %w (status code=%d, url=%q)`, errUnexpectedStatusCode, res.StatusCode, r.u)
}
// replace the body of the response with a limited reader that
// will only read up to MaxBufferSize bytes
res.Body = &limitedBody{
rdr: &io.LimitedReader{R: res.Body, N: MaxBufferSize},
close: res.Body.Close,
}
traceSink.Put(ctx, fmt.Sprintf("httprc.Resource.Sync: transforming %q", r.u))
v, err := r.transform(ctx, res)
if err != nil {
return fmt.Errorf(`httprc.Resource.Sync: %w: %w`, errTransformerFailed, err)
}
traceSink.Put(ctx, fmt.Sprintf("httprc.Resource.Sync: storing new value for %q", r.u))
r.r.Store(v)
r.once.Do(func() { close(r.ready) })
traceSink.Put(ctx, fmt.Sprintf("httprc.Resource.Sync: stored value for %q", r.u))
return nil
}
func (r *ResourceBase[T]) transform(ctx context.Context, res *http.Response) (ret T, gerr error) {
// Protect the call to Transform with a defer/recover block, so that even
// if the Transform method panics, we can recover from it and return an error
defer func() {
if recovered := recover(); recovered != nil {
gerr = fmt.Errorf(`httprc.Resource.transform: %w: %v`, errRecoveredFromPanic, recovered)
}
}()
return r.t.Transform(ctx, res)
}
func (r *ResourceBase[T]) determineNextFetchInterval(ctx context.Context, name string, fromHeader, minValue, maxValue time.Duration) time.Duration {
traceSink := traceSinkFromContext(ctx)
if fromHeader > maxValue {
traceSink.Put(ctx, fmt.Sprintf("httprc.Resource.Sync: %s %s > maximum interval, using maximum interval %s", r.URL(), name, maxValue))
return maxValue
}
if fromHeader < minValue {
traceSink.Put(ctx, fmt.Sprintf("httprc.Resource.Sync: %s %s < minimum interval, using minimum interval %s", r.URL(), name, minValue))
return minValue
}
traceSink.Put(ctx, fmt.Sprintf("httprc.Resource.Sync: %s Using %s (%s)", r.URL(), name, fromHeader))
return fromHeader
}
func (r *ResourceBase[T]) calculateNextRefreshTime(ctx context.Context, res *http.Response) time.Time {
traceSink := traceSinkFromContext(ctx)
now := time.Now()
// If constant interval is set, use that regardless of what the
// response headers say.
if interval := r.ConstantInterval(); interval > 0 {
traceSink.Put(ctx, fmt.Sprintf("httprc.Resource.Sync: %s Explicit interval set, using value %s", r.URL(), interval))
return now.Add(interval)
}
if interval := r.extractCacheControlMaxAge(ctx, res); interval > 0 {
return now.Add(interval)
}
if interval := r.extractExpiresInterval(ctx, res); interval > 0 {
return now.Add(interval)
}
traceSink.Put(ctx, fmt.Sprintf("httprc.Resource.Sync: %s No cache-control/expires headers found, using minimum interval", r.URL()))
return now.Add(r.MinInterval())
}
func (r *ResourceBase[T]) extractCacheControlMaxAge(ctx context.Context, res *http.Response) time.Duration {
traceSink := traceSinkFromContext(ctx)
v := res.Header.Get(`Cache-Control`)
if v == "" {
return 0
}
dir, err := httpcc.ParseResponse(v)
if err != nil {
return 0
}
maxAge, ok := dir.MaxAge()
if !ok {
return 0
}
traceSink.Put(ctx, fmt.Sprintf("httprc.Resource.Sync: %s Cache-Control=max-age directive set (%d)", r.URL(), maxAge))
return r.determineNextFetchInterval(
ctx,
"max-age",
time.Duration(maxAge)*time.Second,
r.MinInterval(),
r.MaxInterval(),
)
}
func (r *ResourceBase[T]) extractExpiresInterval(ctx context.Context, res *http.Response) time.Duration {
traceSink := traceSinkFromContext(ctx)
v := res.Header.Get(`Expires`)
if v == "" {
return 0
}
expires, err := http.ParseTime(v)
if err != nil {
return 0
}
traceSink.Put(ctx, fmt.Sprintf("httprc.Resource.Sync: %s Expires header set (%s)", r.URL(), expires))
return r.determineNextFetchInterval(
ctx,
"expires",
time.Until(expires),
r.MinInterval(),
r.MaxInterval(),
)
}

View File

@@ -0,0 +1,52 @@
package tracesink
import (
"context"
"log/slog"
)
type Interface interface {
Put(context.Context, string)
}
// Nop is an ErrorSink that does nothing. It does not require
// any initialization, so the zero value can be used.
type Nop struct{}
// NewNop returns a new NopTraceSink object. The constructor
// is provided for consistency.
func NewNop() Interface {
return Nop{}
}
// Put for NopTraceSink does nothing.
func (Nop) Put(context.Context, string) {}
type slogSink struct {
level slog.Level
logger SlogLogger
}
type SlogLogger interface {
Log(context.Context, slog.Level, string, ...any)
}
// NewSlog returns a new ErrorSink that logs errors using the provided slog.Logger
func NewSlog(l SlogLogger) Interface {
return &slogSink{
level: slog.LevelInfo,
logger: l,
}
}
func (s *slogSink) Put(ctx context.Context, v string) {
s.logger.Log(ctx, s.level, v)
}
// Func is a TraceSink that calls a function with the trace message.
type Func func(context.Context, string)
// Put calls the function with the trace message.
func (f Func) Put(ctx context.Context, msg string) {
f(ctx, msg)
}

37
vendor/github.com/lestrrat-go/httprc/v3/transformer.go generated vendored Normal file
View File

@@ -0,0 +1,37 @@
package httprc
import (
"context"
"encoding/json"
"io"
"net/http"
)
type bytesTransformer struct{}
// BytesTransformer returns a Transformer that reads the entire response body
// as a byte slice. This is the default Transformer used by httprc.Client
func BytesTransformer() Transformer[[]byte] {
return bytesTransformer{}
}
func (bytesTransformer) Transform(_ context.Context, res *http.Response) ([]byte, error) {
return io.ReadAll(res.Body)
}
type jsonTransformer[T any] struct{}
// JSONTransformer returns a Transformer that decodes the response body as JSON
// into the provided type T.
func JSONTransformer[T any]() Transformer[T] {
return jsonTransformer[T]{}
}
func (jsonTransformer[T]) Transform(_ context.Context, res *http.Response) (T, error) {
var v T
if err := json.NewDecoder(res.Body).Decode(&v); err != nil {
var zero T
return zero, err
}
return v, nil
}

113
vendor/github.com/lestrrat-go/httprc/v3/whitelist.go generated vendored Normal file
View File

@@ -0,0 +1,113 @@
package httprc
import (
"regexp"
"sync"
)
// Whitelist is an interface that allows you to determine if a given URL is allowed
// or not. Implementations of this interface can be used to restrict the URLs that
// the client can access.
//
// By default all URLs are allowed, but this may not be ideal in production environments
// for security reasons.
//
// This exists because you might use this module to store resources provided by
// user of your application, in which case you cannot necessarily trust that the
// URLs are safe.
//
// You will HAVE to provide some sort of whitelist.
type Whitelist interface {
IsAllowed(string) bool
}
// WhitelistFunc is a function type that implements the Whitelist interface.
type WhitelistFunc func(string) bool
func (f WhitelistFunc) IsAllowed(u string) bool { return f(u) }
// BlockAllWhitelist is a Whitelist implementation that blocks all URLs.
type BlockAllWhitelist struct{}
// NewBlockAllWhitelist creates a new BlockAllWhitelist instance. It is safe to
// use the zero value of this type; this constructor is provided for consistency.
func NewBlockAllWhitelist() BlockAllWhitelist { return BlockAllWhitelist{} }
func (BlockAllWhitelist) IsAllowed(_ string) bool { return false }
// InsecureWhitelist is a Whitelist implementation that allows all URLs. Be careful
// when using this in your production code: make sure you do not blindly register
// URLs from untrusted sources.
type InsecureWhitelist struct{}
// NewInsecureWhitelist creates a new InsecureWhitelist instance. It is safe to
// use the zero value of this type; this constructor is provided for consistency.
func NewInsecureWhitelist() InsecureWhitelist { return InsecureWhitelist{} }
func (InsecureWhitelist) IsAllowed(_ string) bool { return true }
// RegexpWhitelist is a jwk.Whitelist object comprised of a list of *regexp.Regexp
// objects. All entries in the list are tried until one matches. If none of the
// *regexp.Regexp objects match, then the URL is deemed unallowed.
type RegexpWhitelist struct {
mu sync.RWMutex
patterns []*regexp.Regexp
}
// NewRegexpWhitelist creates a new RegexpWhitelist instance. It is safe to use the
// zero value of this type; this constructor is provided for consistency.
func NewRegexpWhitelist() *RegexpWhitelist {
return &RegexpWhitelist{}
}
// Add adds a new regular expression to the list of expressions to match against.
func (w *RegexpWhitelist) Add(pat *regexp.Regexp) *RegexpWhitelist {
w.mu.Lock()
defer w.mu.Unlock()
w.patterns = append(w.patterns, pat)
return w
}
// IsAllowed returns true if any of the patterns in the whitelist
// returns true.
func (w *RegexpWhitelist) IsAllowed(u string) bool {
w.mu.RLock()
patterns := w.patterns
w.mu.RUnlock()
for _, pat := range patterns {
if pat.MatchString(u) {
return true
}
}
return false
}
// MapWhitelist is a jwk.Whitelist object comprised of a map of strings.
// If the URL exists in the map, then the URL is allowed to be fetched.
type MapWhitelist interface {
Whitelist
Add(string) MapWhitelist
}
type mapWhitelist struct {
mu sync.RWMutex
store map[string]struct{}
}
func NewMapWhitelist() MapWhitelist {
return &mapWhitelist{store: make(map[string]struct{})}
}
func (w *mapWhitelist) Add(pat string) MapWhitelist {
w.mu.Lock()
defer w.mu.Unlock()
w.store[pat] = struct{}{}
return w
}
func (w *mapWhitelist) IsAllowed(u string) bool {
w.mu.RLock()
_, b := w.store[u]
w.mu.RUnlock()
return b
}

62
vendor/github.com/lestrrat-go/httprc/v3/worker.go generated vendored Normal file
View File

@@ -0,0 +1,62 @@
package httprc
import (
"context"
"fmt"
"sync"
)
type worker struct {
httpcl HTTPClient
incoming chan any
next <-chan Resource
nextsync <-chan synchronousRequest
errSink ErrorSink
traceSink TraceSink
}
func (w worker) Run(ctx context.Context, readywg *sync.WaitGroup, donewg *sync.WaitGroup) {
w.traceSink.Put(ctx, "httprc worker: START worker loop")
defer w.traceSink.Put(ctx, "httprc worker: END worker loop")
defer donewg.Done()
ctx = withTraceSink(ctx, w.traceSink)
ctx = withHTTPClient(ctx, w.httpcl)
readywg.Done()
for {
select {
case <-ctx.Done():
w.traceSink.Put(ctx, "httprc worker: stopping worker loop")
return
case r := <-w.next:
w.traceSink.Put(ctx, fmt.Sprintf("httprc worker: syncing %q (async)", r.URL()))
if err := r.Sync(ctx); err != nil {
w.errSink.Put(ctx, err)
}
r.SetBusy(false)
w.sendAdjustIntervalRequest(ctx, r)
case sr := <-w.nextsync:
w.traceSink.Put(ctx, fmt.Sprintf("httprc worker: syncing %q (synchronous)", sr.resource.URL()))
if err := sr.resource.Sync(ctx); err != nil {
w.traceSink.Put(ctx, fmt.Sprintf("httprc worker: FAILED to sync %q (synchronous): %s", sr.resource.URL(), err))
sendReply(ctx, sr.reply, struct{}{}, err)
sr.resource.SetBusy(false)
return
}
w.traceSink.Put(ctx, fmt.Sprintf("httprc worker: SUCCESS syncing %q (synchronous)", sr.resource.URL()))
sr.resource.SetBusy(false)
sendReply(ctx, sr.reply, struct{}{}, nil)
w.sendAdjustIntervalRequest(ctx, sr.resource)
}
}
}
func (w worker) sendAdjustIntervalRequest(ctx context.Context, r Resource) {
w.traceSink.Put(ctx, "httprc worker: Sending interval adjustment request for "+r.URL())
select {
case <-ctx.Done():
case w.incoming <- adjustIntervalRequest{resource: r}:
}
w.traceSink.Put(ctx, "httprc worker: Sent interval adjustment request for "+r.URL())
}

4
vendor/github.com/lestrrat-go/jwx/v3/.bazelignore generated vendored Normal file
View File

@@ -0,0 +1,4 @@
cmd
bench
examples
tools

1
vendor/github.com/lestrrat-go/jwx/v3/.bazelrc generated vendored Normal file
View File

@@ -0,0 +1 @@
import %workspace%/.aspect/bazelrc/bazel7.bazelrc

1
vendor/github.com/lestrrat-go/jwx/v3/.bazelversion generated vendored Normal file
View File

@@ -0,0 +1 @@
8.3.1

39
vendor/github.com/lestrrat-go/jwx/v3/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,39 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof
# IDE
.idea
.vscode
.DS_Store
*~
coverage.out
# I redirect my test output to files named "out" way too often
out
cmd/jwx/jwx
bazel-*

125
vendor/github.com/lestrrat-go/jwx/v3/.golangci.yml generated vendored Normal file
View File

@@ -0,0 +1,125 @@
version: "2"
linters:
default: all
disable:
- cyclop
- depguard
- dupl
- err113
- errorlint
- exhaustive
- funcorder
- funlen
- gochecknoglobals
- gochecknoinits
- gocognit
- gocritic
- gocyclo
- godot
- godox
- gosec
- gosmopolitan
- govet
- inamedparam
- ireturn
- lll
- maintidx
- makezero
- mnd
- nakedret
- nestif
- nlreturn
- noinlineerr
- nonamedreturns
- paralleltest
- perfsprint
- staticcheck
- recvcheck
- tagliatelle
- testifylint
- testpackage
- thelper
- varnamelen
- wrapcheck
- wsl
- wsl_v5
settings:
govet:
disable:
- shadow
- fieldalignment
enable-all: true
exclusions:
generated: lax
presets:
- comments
- common-false-positives
- legacy
- std-error-handling
rules:
- linters:
- staticcheck
path: /*.go
text: 'ST1003: should not use underscores in package names'
- linters:
- revive
path: /*.go
text: don't use an underscore in package name
- linters:
- staticcheck
text: SA1019
- linters:
- contextcheck
- exhaustruct
path: /*.go
- linters:
- errcheck
path: /main.go
- linters:
- errcheck
path: internal/codegen/codegen.go
- linters:
- errcheck
- errchkjson
- forcetypeassert
path: internal/jwxtest/jwxtest.go
- linters:
- errcheck
- errchkjson
- forcetypeassert
path: /*_test.go
- linters:
- forbidigo
path: /*_example_test.go
- linters:
- forbidigo
path: cmd/jwx/jwx.go
- linters:
- revive
path: /*_test.go
text: 'var-naming: '
- linters:
- revive
path: internal/tokens/jwe_tokens.go
text: "don't use ALL_CAPS in Go names"
- linters:
- revive
path: jwt/internal/types/
text: "var-naming: avoid meaningless package names"
paths:
- third_party$
- builtin$
- examples$
issues:
max-issues-per-linter: 0
max-same-issues: 0
formatters:
enable:
- gofmt
- goimports
exclusions:
generated: lax
paths:
- third_party$
- builtin$
- examples$

47
vendor/github.com/lestrrat-go/jwx/v3/BUILD generated vendored Normal file
View File

@@ -0,0 +1,47 @@
load("@rules_go//go:def.bzl", "go_library", "go_test")
load("@gazelle//:def.bzl", "gazelle")
# gazelle:prefix github.com/lestrrat-go/jwx/v3
# gazelle:go_naming_convention import_alias
gazelle(name = "gazelle")
go_library(
name = "jwx",
srcs = [
"format.go",
"formatkind_string_gen.go",
"jwx.go",
"options.go",
],
importpath = "github.com/lestrrat-go/jwx/v3",
visibility = ["//visibility:public"],
deps = [
"//internal/json",
"//internal/tokens",
"@com_github_lestrrat_go_option_v2//:option",
],
)
go_test(
name = "jwx_test",
srcs = ["jwx_test.go"],
deps = [
":jwx",
"//internal/jose",
"//internal/json",
"//internal/jwxtest",
"//jwa",
"//jwe",
"//jwk",
"//jwk/ecdsa",
"//jws",
"@com_github_stretchr_testify//require",
],
)
alias(
name = "go_default_library",
actual = ":jwx",
visibility = ["//visibility:public"],
)

222
vendor/github.com/lestrrat-go/jwx/v3/Changes generated vendored Normal file
View File

@@ -0,0 +1,222 @@
Changes
=======
v3 has many incompatibilities with v2. To see the full list of differences between
v2 and v3, please read the Changes-v3.md file (https://github.com/lestrrat-go/jwx/blob/develop/v3/Changes-v3.md)
v3.0.11 14 Sep 2025
* [jwk] Add `(jwk.Cache).Shutdown()` method that delegates to the httprc controller
object, to shutdown the cache.
* [jwk] Change timing of `res.Body.Close()` call
* [jwe] Previously, ecdh.PrivateKey/ecdh.PublicKey were not properly handled
when used for encryption, which has been fixed.
* [jws/jwsbb] (EXPERIMENTAL/BREAKS COMPATIBILITY) Convert most functions into
thin wrappers around functions from github.com/lestrrat-go/dsig package.
As a related change, HAMCHashFuncFor/RSAHashFuncFor/ECDSAHashFuncFor/RSAPSSOptions
have been removed or unexported.
Users of this module should be using jwsbb.Sign() and jwsbb.Verify() instead of
algorithm specific jwsbb.SignRSA()/jwsbb.VerifyRSA() and such. If you feel the
need to use these functions, you should use github.com/lestrrat-go/dsig directly.
v3.0.10 04 Aug 2025
* [jws/jwsbb] Add `jwsbb.ErrHeaderNotFound()` to return the same error type as when
a non-existent header is requested. via `HeaderGetXXX()` functions. Previously, this
function was called `jwsbb.ErrFieldNotFound()`, but it was a misnomer.
* [jws/jwsbb] Fix a bug where error return values from `HeaderGetXXX()` functions
could not be matched against `jwsbb.ErrHeaderNotFound()` using `errors.Is()`.
v3.0.9 31 Jul 2025
* [jws/jwsbb] `HeaderGetXXX()` functions now return errors when
the requested header is not found, or if the value cannot be
converted to the requested type.
* [jwt] `(jwt.Token).Get` methods now return specific types of errors depending
on if a) the specified claim was not present, or b) the specified claim could
not be assigned to the destination variable.
You can distinguish these by using `errors.Is` against `jwt.ClaimNotFoundError()`
or `jwt.ClaimAssignmentFailedError()`
v3.0.8 27 Jun 2025
* [jwe/jwebb] (EXPERIMENTAL) Add low-level functions for JWE operations.
* [jws/jwsbb] (EXPERIMENTAL/BREAKS COMPATIBILITY) Add io.Reader parameter
so your choice of source of randomness can be passed. Defaults to crypto/rand.Reader.
Function signatures around jwsbb.Sign() now accept an addition `rr io.Reader`,
which can be nil for 99% of use cases.
* [jws/jwsbb] Add HeaderParse([]byte), where it is expected that the header
is already in its base64 decoded format.
* misc: replace `interface{}` with `any`
v3.0.7 16 Jun 2025
* [jws/jwsbb] (EXPERIMENTAL) Add low-level fast access to JWS headers in compact
serialization form.
* [jws] Fix error reporting when no key matched for a signature.
* [jws] Refactor jws signer setup.
* Known algorithms are now implemented completely in the jws/jwsbb package.
* VerifierFor and SignerFor now always succeed, and will also return a Signer2
or Verifier2 that wraps the legacy Signer or Verifier if one is registered.
v3.0.6 13 Jun 2025
* This release contains various performance improvements all over the code.
No, this time for real. In particular, the most common case for signing
a JWT with a key is approx 70% more efficient based on the number of allocations.
Please read the entry for the (retracted) v3.0.4 for what else I have to
say about performance improvements
* [jwt] Added fast-path for token signing and verification. The fast path
is triggered if you only pass `jwt.Sign()` and `jwt.Parse()` one options each
(`jwt.WithKey()`), with no suboptions.
* [jws] Major refactoring around basic operations:
* How to work with Signer/Verifier have completely changed. Please take
a look at examples/jws_custom_signer_verifier_example_test.go for how
to do it the new way. The old way still works, but it WILL be removed
when v4 arrives.
* Related to the above, old code has been moved to `jws/legacy`.
* A new package `jws/jwsbb` has been added. `bb` stands for building blocks.
This package separates out the low-level JWS operations into its own
package. So if you are looking for just the signing of a payload with
a key, this is it.
`jws/jwsbb` is currently considered to be EXPERIMENTAL.
v3.0.5 11 Jun 2025
* Retract v3.0.4
* Code for v3.0.3 is the same as v3.0.3
v3.0.4 09 Jun 2025
* This release contains various performance improvements all over the code.
Because of the direction that this library is taking, we have always been
more focused on correctness and usability/flexibility over performance.
It just so happens that I had a moment of inspiration and decided to see
just how good our AI-based coding agents are in this sort of analysis-heavy tasks.
Long story short, the AI was fairly good at identifying suspicious code with
an okay accuracy, but completely failed to make any meaningful changes to the
code in a way that both did not break the code _and_ improved performance.
I am sure that they will get better in the near future, but for now,
I had to do the changes myself. I should clarify to their defence that
the AI was very helpful in writing cumbersome benchmark code for me.
The end result is that we have anywhere from 10 to 30% performance improvements
in various parts of the code that we touched, based on number of allocations.
We believe that this would be a significant improvement for many users.
For further improvements, we can see that there would be a clear benefit to
writing optimized code path that is designed to serve the most common cases.
For example, for the case of signing JWTs with a single key, we could provide
a path that skips a lot of extra processing (we kind of did that in this change,
but we _could_ go ever harder in this direction). However, it is a trade-off between
maintainability and performance, and as I am currently the sole maintainer of
this library for the time being, I only plan to pursue such a route where it
requires minimal effort on my part.
If you are interested in helping out in this area, I hereby thank you in advance.
However, please be perfectly clear that unlike other types of changes, for performance
related changes, the balance between the performance gains and maintainability is
top priority. If you have good ideas and code, they will always be welcome, but
please be prepared to justify your changes.
Finally, thank you for using this library!
v3.0.3 06 Jun 2025
* Update some dependencies
* [jwe] Change some error messages to contain more context information
v3.0.2 03 Jun 2025
* [transform] (EXPERIMENTAL) Add utility function `transform.AsMap` to convert a
Mappable object to a map[string]interface{}. This is useful for converting
objects such as `jws.Header`, `jwk.Key`, `jwt.Token`, etc. to a map that can
be used with other libraries that expect a map.
* [jwt] (EXPERIMENTAL) Added token filtering functionality through the TokenFilter interface.
* [jwt/openid] (EXPERIMENTAL) Added StandardClaimsFilter() for filtering standard OpenID claims.
* [jws] (EXPERIMENTAL) Added header filtering functionality through the HeaderFilter interface.
* [jwe] (EXPERIMENTAL) Added header filtering functionality through the HeaderFilter interface.
* [jwk] (EXPERIMENTAL) Added key filtering functionality through the KeyFilter interface.
* [jwk] `jwk.Export` previously did not recognize third-party objects that implemented `jwk.Key`,
as it was detecting what to do by checking if the object was one of our own unexported
types. This caused some problems for consumers of this library that wanted to extend the
features of the keys.
Now `jwk.Export` checks types against interface types such as `jwk.RSAPrivateKey`, `jwk.ECDSAPrivateKey`, etc.
It also uses some reflect blackmagic to detect if the given object implements the `jwk.Key` interface
via embedding, so you should be able to embed a `jwk.Key` to another object to act as if it
is a legitimate `jwk.Key`, as far as `jwk.Export` is concerned.
v3.0.1 29 Apr 2025
* [jwe] Fixed a long standing bug that could lead to degraded encryption or failure to
decrypt JWE messages when a very specific combination of inputs were used for
JWE operations.
This problem only manifested itself when the following conditions in content encryption or decryption
were met:
- Content encryption was specified to use DIRECT mode.
- Contentn encryption algorithm is specified as A256CBC_HS512
- The key was erronously constructed with a 32-byte content encryption key (CEK)
In this case, the user would be passing a mis-constructed key of 32-bytes instead
of the intended 64-bytes. In all other cases, this construction would cause
an error because `crypto/aes.NewCipher` would return an error when a key with length
not matching 16, 24, and 32 bytes is used. However, due to use using a the provided
32-bytes as half CEK and half the hash, the `crypto/aes.NewCipher` was passed
a 16-byte key, which is fine for AES-128. So internally `crypto/aes.NewCipher` would
choose to use AES-128 instead of AES-256, and happily continue. Note that no other
key lengths such as 48 and 128 would have worked. It had to be exactly 32.
This does indeed result in a downgraded encryption, but we believe it is unlikely that this would cause a problem in the real world,
as you would have to very specifically choose to use DIRECT mode, choose
the specific content encryption algorithm, AND also use the wrong key size of
exactly 32 bytes.
However, in abandunce of caution, we recommend that you upgrade to v3.0.1 or later,
or v2.1.6 or later if you are still on v2 series.
* [jws] Improve performance of jws.SplitCompact and jws.SplitCompactString
* [jwe] Improve performance of jwe.Parse
v3.0.0 1 Apr 2025
* Release initial v3.0.0 series. Code is identical to v3.0.0-beta2, except
for minor documentation changes.
Please note that v1 will no longer be maintained.
Going forward v2 will receive security updates but will no longer receive
feature updates. Users are encouraged to migrate to v3. There is no hard-set
guarantee as to how long v2 will be supported, but if/when v4 comes out,
v2 support will be terminated then.
v3.0.0-beta2 30 Mar 2025
* [jwk] Fix a bug where `jwk.Set`'s `Keys()` method did not return the proper
non-standard fields. (#1322)
* [jws][jwt] Implement `WithBase64Encoder()` options to pass base64 encoders
to use during signing/verifying signatures. This useful when the token
provider generates JWTs that don't follow the specification and uses base64
encoding other than raw url encoding (no padding), such as, apparently,
AWS ALB. (#1324, #1328)
v3.0.0-beta1 15 Mar 2025
* [jwt] Token validation no longer truncates time based fields by default.
To restore old behavior, you can either change the global settings by
calling `jwt.Settings(jwt.WithTruncation(time.Second))`, or you can
change it by each invocation by using `jwt.Validate(..., jwt.WithTruncation(time.Second))`
v3.0.0-alpha3 13 Mar 2025
* [jwk] Importing/Exporting from jwk.Key with P256/P386/P521 curves to
ecdh.PrivateKey/ecdh.PublicKey should now work. Previously these keys were not properly
recognized by the exporter/importer. Note that keys that use X25519 and P256/P384/P521
behave differently: X25519 keys can only be exported to/imported from OKP keys,
while P256/P384/P521 can be exported to either ecdsa or ecdh keys.
v3.0.0-alpha2 25 Feb 2025
* Update to work with go1.24
* Update tests to work with latest latchset/jose
* Fix build pipeline to work with latest golangci-lint
* Require go1.23
v3.0.0-alpha1 01 Nov 2024
* Initial release of v3 line.

390
vendor/github.com/lestrrat-go/jwx/v3/Changes-v2.md generated vendored Normal file
View File

@@ -0,0 +1,390 @@
# Incompatible Changes from v1 to v2
These are changes that are incompatible with the v1.x.x version.
* [tl;dr](#tldr) - If you don't feel like reading the details -- but you will read the details, right?
* [Detailed List of Changes](#detailed-list-of-changes) - A comprehensive list of changes from v1 to v2
# tl;dr
## JWT
```go
// most basic
jwt.Parse(serialized, jwt.WithKey(alg, key)) // NOTE: verification and validation are ENABLED by default!
jwt.Sign(token, jwt.WithKey(alg,key))
// with a jwk.Set
jwt.Parse(serialized, jwt.WithKeySet(set))
// UseDefault/InferAlgorithm with JWKS
jwt.Parse(serialized, jwt.WithKeySet(set,
jws.WithUseDefault(true), jws.WithInferAlgorithm(true))
// Use `jku`
jwt.Parse(serialized, jwt.WithVerifyAuto(...))
// Any other custom key provisioning (using functions in this
// example, but can be anything that fulfills jws.KeyProvider)
jwt.Parse(serialized, jwt.WithKeyProvider(jws.KeyProviderFunc(...)))
```
## JWK
```go
// jwk.New() was confusing. Renamed to fit the actual implementation
key, err := jwk.FromRaw(rawKey)
// Algorithm() now returns jwa.KeyAlgorithm type. `jws.Sign()`
// and other function that receive JWK algorithm names accept
// this new type, so you can use the same key and do the following
// (previously you needed to type assert)
jws.Sign(payload, jws.WithKey(key.Algorithm(), key))
// If you need the specific type, type assert
key.Algorithm().(jwa.SignatureAlgorithm)
// jwk.AutoRefresh is no more. Use jwk.Cache
cache := jwk.NewCache(ctx, options...)
// Certificate chains are no longer jwk.CertificateChain type, but
// *(github.com/lestrrat-go/jwx/cert).Chain
cc := key.X509CertChain() // this is *cert.Chain now
```
## JWS
```go
// basic
jws.Sign(payload, jws.WithKey(alg, key))
jws.Sign(payload, jws.WithKey(alg, key), jws.WithKey(alg, key), jws.WithJSON(true))
jws.Verify(signed, jws.WithKey(alg, key))
// other ways to pass the key
jws.Sign(payload, jws.WithKeySet(jwks))
jws.Sign(payload, jws.WithKeyProvider(kp))
// retrieve the key that succeeded in verifying
var keyUsed interface{}
jws.Verify(signed, jws.WithKeySet(jwks), jws.WithKeyUsed(&keyUsed))
```
## JWE
```go
// basic
jwe.Encrypt(payload, jwe.WithKey(alg, key)) // other defaults are inferred
jwe.Encrypt(payload, jwe.WithKey(alg, key), jwe.WithKey(alg, key), jwe.WithJSON(true))
jwe.Decrypt(encrypted, jwe.WithKey(alg, key))
// other ways to pass the key
jwe.Encrypt(payload, jwe.WithKeySet(jwks))
jwe.Encrypt(payload, jwe.WithKeyProvider(kp))
// retrieve the key that succeeded in decrypting
var keyUsed interface{}
jwe.Verify(signed, jwe.WithKeySet(jwks), jwe.WithKeyUsed(&keyUsed))
```
# Detailed List of Changes
## Module
* Module now requires go 1.16
* Use of github.com/pkg/errors is no more. If you were relying on behavior
that depends on the errors being an instance of github.com/pkg/errors
then you need to change your code
* File-generation tools have been moved out of internal/ directories.
These files pre-dates Go modules, and they were in internal/ in order
to avoid being listed in the `go doc` -- however, now that we can
make them separate modules this is no longer necessary.
* New package `cert` has been added to handle `x5c` certificate
chains, and to work with certificates
* cert.Chain to store base64 encoded ASN.1 DER format certificates
* cert.EncodeBase64 to encode ASN.1 DER format certificate using base64
* cert.Create to create a base64 encoded ASN.1 DER format certificates
* cert.Parse to parse base64 encoded ASN.1 DER format certificates
## JWE
* `jwe.Compact()`'s signature has changed to
`jwe.Compact(*jwe.Message, ...jwe.CompactOption)`
* `jwe.JSON()` has been removed. You can generate JSON serialization
using `jwe.Encrypt(jwe.WitJSON())` or `json.Marshal(jwe.Message)`
* `(jwe.Message).Decrypt()` has been removed. Since formatting of the
original serialized message matters (including whitespace), using a parsed
object was inherently confusing.
* `jwe.Encrypt()` can now generate JWE messages in either compact or JSON
forms. By default, the compact form is used. JSON format can be
enabled by using the `jwe.WithJSON` option.
* `jwe.Encrypt()` can now accept multiple keys by passing multiple
`jwe.WithKey()` options. This can be used with `jwe.WithJSON` to
create JWE messages with multiple recipients.
* `jwe.DecryptEncryptOption()` has been renamed to `jwe.EncryptDecryptOption()`.
This is so that it is more uniform with `jws` equivalent of `jws.SignVerifyOption()`
where the producer (`Sign`) comes before the consumer (`Verify`) in the naming
* `jwe.WithCompact` and `jwe.WithJSON` options have been added
to control the serialization format.
* jwe.Decrypt()'s method signature has been changed to `jwt.Decrypt([]byte, ...jwe.DecryptOption) ([]byte, error)`.
These options can be stacked. Therefore, you could configure the
verification process to attempt a static key pair, a JWKS, and only
try other forms if the first two fails, for example.
- For static key pair, use `jwe.WithKey()`
- For static JWKS, use `jwe.WithKeySet()` (NOTE: InferAlgorithmFromKey like in `jws` package is NOT supported)
- For custom, possibly dynamic key provisioning, use `jwe.WithKeyProvider()`
* jwe.Decrypter has been unexported. Users did not need this.
* jwe.WithKeyProvider() has been added to specify arbitrary
code to specify which keys to try.
* jwe.KeyProvider interface has been added
* jwe.KeyProviderFunc has been added
* `WithPostParser()` has been removed. You can achieve the same effect
by using `jwe.WithKeyProvider()`. Because this was the only consumer for
`jwe.DecryptCtx`, this type has been removed as well.
* `x5c` field type has been changed to `*cert.Chain` instead of `[]string`
* Method signature for `jwe.Parse()` has been changed to include options,
but options are currently not used
* `jwe.ReadFile` now supports the option `jwe.WithFS` which allows you to
read data from arbitrary `fs.FS` objects
* jwe.WithKeyUsed has been added to allow users to retrieve
the key used for decryption. This is useful in cases you provided
multiple keys and you want to know which one was successful
## JWK
* `jwk.New()` has been renamed to `jwk.FromRaw()`, which hopefully will
make it easier for the users what the input should be.
* `jwk.Set` has many interface changes:
* Changed methods to match jwk.Key and its semantics:
* Field is now Get() (returns values for arbitrary fields other than keys). Fetching a key is done via Key()
* Remove() now removes arbitrary fields, not keys. to remove keys, use RemoveKey()
* Iterate has been added to iterate through all non-key fields.
* Add is now AddKey(Key) string, and returns an error when the same key is added
* Get is now Key(int) (Key, bool)
* Remove is now RemoveKey(Key) error
* Iterate is now Keys(context.Context) KeyIterator
* Clear is now Clear() error
* `jwk.CachedSet` has been added. You can create a `jwk.Set` that is backed by
`jwk.Cache` so you can do this:
```go
cache := jkw.NewCache(ctx)
cachedSet := jwk.NewCachedSet(cache, jwksURI)
// cachedSet is always the refreshed, cached version from jwk.Cache
jws.Verify(signed, jws.WithKeySet(cachedSet))
```
* `jwk.NewRSAPRivateKey()`, `jwk.NewECDSAPrivateKey()`, etc have been removed.
There is no longer any way to create concrete types of `jwk.Key`
* `jwk.Key` type no longer supports direct unmarshaling via `json.Unmarshal()`,
because you can no longer instantiate concrete `jwk.Key` types. You will need to
use `jwk.ParseKey()`. See the documentation for ways to parse JWKs.
* `(jwk.Key).Algorithm()` is now of `jwk.KeyAlgorithm` type. This field used
to be `string` and therefore could not be passed directly to `jwt.Sign()`
`jws.Sign()`, `jwe.Encrypt()`, et al. This is no longer the case, and
now you can pass it directly. See
https://github.com/lestrrat-go/jwx/blob/v2/docs/99-faq.md#why-is-jwkkeyalgorithm-and-jwakeyalgorithm-so-confusing
for more details
* `jwk.Fetcher` and `jwk.FetchFunc` has been added.
They represent something that can fetch a `jwk.Set`
* `jwk.CertificateChain` has been removed, use `*cert.Chain`
* `x5c` field type has been changed to `*cert.Chain` instead of `[]*x509.Certificate`
* `jwk.ReadFile` now supports the option `jwk.WithFS` which allows you to
read data from arbitrary `fs.FS` objects
* Added `jwk.PostFetcher`, `jwk.PostFetchFunc`, and `jwk.WithPostFetch` to
allow users to get at the `jwk.Set` that was fetched in `jwk.Cache`.
This will make it possible for users to supply extra information and edit
`jwk.Set` after it has been fetched and parsed, but before it is cached.
You could, for example, modify the `alg` field so that it's easier to
work with when you use it in `jws.Verify` later.
* Reworked `jwk.AutoRefresh` in terms of `github.com/lestrrat-go/httprc`
and renamed it `jwk.Cache`.
Major difference between `jwk.AutoRefresh` and `jwk.Cache` is that while
former used one `time.Timer` per resource, the latter uses a static timer
(based on `jwk.WithRefreshWindow()` value, default 15 minutes) that periodically
refreshes all resources that were due to be refreshed within that time frame.
This method may cause your updates to happen slightly later, but uses significantly
less resources and is less prone to clogging.
* Reimplemented `jwk.Fetch` in terms of `github.com/lestrrat-go/httprc`.
* Previously `jwk.Fetch` and `jwk.AutoRefresh` respected backoff options,
but this has been removed. This is to avoid unwanted clogging of the fetch workers
which is the default processing mode in `github.com/lestrrat-go/httprc`.
If you are using backoffs, you need to control your inputs more carefully so as
not to clog your fetch queue, and therefore you should be writing custom code that
suits your needs
## JWS
* `jws.Sign()` can now generate JWS messages in either compact or JSON
forms. By default, the compact form is used. JSON format can be
enabled by using the `jws.WithJSON` option.
* `jws.Sign()` can now accept multiple keys by passing multiple
`jws.WithKey()` options. This can be used with `jws.WithJSON` to
create JWS messages with multiple signatures.
* `jws.WithCompact` and `jws.WithJSON` options have been added
to control the serialization format.
* jws.Verify()'s method signature has been changed to `jwt.Verify([]byte, ...jws.VerifyOption) ([]byte, error)`.
These options can be stacked. Therefore, you could configure the
verification process to attempt a static key pair, a JWKS, and only
try other forms if the first two fails, for example.
- For static key pair, use `jws.WithKey()`
- For static JWKS, use `jws.WithKeySet()`
- For enabling verification using `jku`, use `jws.WithVerifyAuto()`
- For custom, possibly dynamic key provisioning, use `jws.WithKeyProvider()`
* jws.WithVerify() has been removed.
* jws.WithKey() has been added to specify an algorithm + key to
verify the payload with.
* jws.WithKeySet() has been added to specify a JWKS to be used for
verification. By default `kid` AND `alg` must match between the signature
and the key.
The option can take further suboptions:
```go
jws.Parse(serialized,
jws.WithKeySet(set,
// by default `kid` is required. set false to disable.
jws.WithRequireKid(false),
// optionally skip matching kid if there's exactly one key in set
jws.WithUseDefault(true),
// infer algorithm name from key type
jws.WithInferAlgorithm(true),
),
)
```
* `jws.VerifuAuto` has been removed in favor of using
`jws.WithVerifyAuto` option with `jws.Verify()`
* `jws.WithVerifyAuto` has been added to enable verification
using `jku`.
The first argument must be a jwk.Fetcher object, but can be
set to `nil` to use the default implementation which is `jwk.Fetch`
The rest of the arguments are treated as options passed to the
`(jwk.Fetcher).Fetch()` function.
* Remove `jws.WithPayloadSigner()`. This should be completely replaceable
using `jws.WithKey()`
* jws.WithKeyProvider() has been added to specify arbitrary
code to specify which keys to try.
* jws.KeyProvider interface has been added
* jws.KeyProviderFunc has been added
* jws.WithKeyUsed has been added to allow users to retrieve
the key used for verification. This is useful in cases you provided
multiple keys and you want to know which one was successful
* `x5c` field type has been changed to `*cert.Chain` instead of `[]string`
* `jws.ReadFile` now supports the option `jws.WithFS` which allows you to
read data from arbitrary `fs.FS` objects
## JWT
* `jwt.Parse` now verifies the signature and validates the token
by default. You must disable it explicitly using `jwt.WithValidate(false)`
and/or `jwt.WithVerify(false)` if you only want to parse the JWT message.
If you don't want either, a convenience function `jwt.ParseInsecure`
has been added.
* `jwt.Parse` can only parse raw JWT (JSON) or JWS (JSON or Compact).
It no longer accepts JWE messages.
* `jwt.WithDecrypt` has been removed
* `jwt.WithJweHeaders` has been removed
* `jwt.WithVerify()` has been renamed to `jwt.WithKey()`. The option can
be used for signing, encryption, and parsing.
* `jwt.Validator` has been changed to return `jwt.ValidationError`.
If you provide a custom validator, you should wrap the error with
`jwt.NewValidationError()`
* `jwt.UseDefault()` has been removed. You should use `jws.WithUseDefault()`
as a suboption in the `jwt.WithKeySet()` option.
```go
jwt.Parse(serialized, jwt.WithKeySet(set, jws.WithUseDefault(true)))
```
* `jwt.InferAlgorithmFromKey()` has been removed. You should use
`jws.WithInferAlgorithmFromKey()` as a suboption in the `jwt.WithKeySet()` option.
```go
jwt.Parse(serialized, jwt.WithKeySet(set, jws.WithInferAlgorithmFromKey(true)))
```
* jwt.WithKeySetProvider has been removed. Use `jwt.WithKeyProvider()`
instead. If jwt.WithKeyProvider seems a bit complicated, use a combination of
JWS parse, no-verify/validate JWT parse, and an extra JWS verify:
```go
msg, _ := jws.Parse(signed)
token, _ := jwt.Parse(msg.Payload(), jwt.WithVerify(false), jwt.WithValidate(false))
// Get information out of token, for example, `iss`
switch token.Issuer() {
case ...:
jws.Verify(signed, jwt.WithKey(...))
}
```
* `jwt.WithHeaders` and `jwt.WithJwsHeaders` have been removed.
You should be able to use the new `jwt.WithKey` option to pass headers
* `jwt.WithSignOption` and `jwt.WithEncryptOption` have been added as
escape hatches for options that are declared in `jws` and `jwe` packages
but not in `jwt`
* `jwt.ReadFile` now supports the option `jwt.WithFS` which allows you to
read data from arbitrary `fs.FS` objects
* `jwt.Sign()` has been changed so that it works more like the new `jws.Sign()`

140
vendor/github.com/lestrrat-go/jwx/v3/Changes-v3.md generated vendored Normal file
View File

@@ -0,0 +1,140 @@
# Incompatible Changes from v2 to v3
These are changes that are incompatible with the v2.x.x version.
# Detailed list of changes
## Module
* This module now requires Go 1.23
* All `xxx.Get()` methods have been changed from `Get(string) (interface{}, error)` to
`Get(string, interface{}) error`, where the second argument should be a pointer
to the storage destination of the field.
* All convenience accessors (e.g. `(jwt.Token).Subject`) now return `(T, bool)` instead of
`T`. If you want an accessor that returns a single value, consider using `Get()`
* Most major errors can now be differentiated using `errors.Is`
## JWA
* All string constants have been renamed to equivalent functions that return a struct.
You should rewrite `jwa.RS256` as `jwa.RS256()` and so forth.
* By default, only known algorithm names are accepted. For example, in our JWK tests,
there are tests that deal with "ECMR" algorithm, but this will now fail by default.
If you want this algorithm to succeed parsing, you need to call `jwa.RegisterXXXX`
functions before using them.
* Previously, unmarshaling unquoted strings used to work (e.g. `var s = "RS256"`),
but now they must conform to the JSON standard and be quoted (e.g. `var s = strconv.Quote("RS256")`)
## JWT
* All convenience accessors (e.g. `Subject`) now return `(T, bool)` instead of
just `T`. If you want a single return value accessor, use `Get(dst) error` instead.
* Validation used to work for `iat`, `nbf`, `exp` fields where these fields were
set to the explicit time.Time{} zero value, but now the _presence_ of these fields matter.
* Validation of fields related to time used to be truncated to one second accuracy,
but no longer does so. To restore old behavior, you can either change the global settings by
calling `jwt.Settings(jwt.WithTruncation(time.Second))`, or you can
change it by each invocation by using `jwt.Validate(..., jwt.WithTruncation(time.Second))`
* Error names have been renamed. For example `jwt.ErrInvalidJWT` has been renamed to
`jwt.UnknownPayloadTypeError` to better reflect what the error means. For other errors,
`func ErrXXXX()` have generally been renamed to `func XXXError()`
* Validation errors are now wrapped. While `Validate()` returns a `ValidateError()` type,
it can also be matched against more specific error types such as `TokenExpierdError()`
using `errors.Is`
* `jwt.ErrMissingRequiredClaim` has been removed
## JWS
* Iterators have been completely removed.
* As a side effect of removing iterators, some methods such as `Copy()` lost the
`context.Context` argument
* All convenience accessors (e.g. `Algorithm`) now return `(T, bool)` instead of
just `T`. If you want a single return value accessor, use `Get(dst) error` instead.
* Errors from `jws.Sign` and `jws.Verify`, as well as `jws.Parse` (and friends)
can now be differentiated by using `errors.Is`. All `jws.IsXXXXError` functions
have been removed.
## JWE
* Iterators have been completely removed.
* As a side effect of removing iterators, some methods such as `Copy()` lost the
`context.Context` argument
* All convenience accessors (e.g. `Algorithm`) now return `(T, bool)` instead of
just `T`. If you want a single return value accessor, use `Get(dst) error` instead.
* Errors from `jwe.Decrypt` and `jwe.Encrypt`, as well as `jwe.Parse` (and friends)
can now be differentiated by using `errors.Is`. All `jwe.IsXXXXrror` functions
have been removed.
## JWK
* All convenience accessors (e.g. `Algorithm`, `Crv`) now return `(T, bool)` instead
of just `T`, except `KeyType`, which _always_ returns a valid value. If you want a
single return value accessor, use `Get(dst) error` instead.
* `jwk.KeyUsageType` can now be configured so that it's possible to assign values
other than "sig" and "enc" via `jwk.RegisterKeyUsage()`. Furthermore, strict
checks can be turned on/off against these registered values
* `jwk.Cache` has been completely re-worked based on github.com/lestrrat-go/httprc/v3.
In particular, the default whitelist mode has changed from "block everything" to
"allow everything".
* Experimental secp256k1 encoding/decoding for PEM encoded ASN.1 DER Format
has been removed. Instead, `jwk.PEMDecoder` and `jwk.PEMEncoder` have been
added to support those who want to perform non-standard PEM encoding/decoding
* Iterators have been completely removed.
* `jwk/x25519` has been removed. To use X25519 keys, use `(crypto/ecdh).PrivateKey` and
`(crypto/ecdh).PublicKey`. Similarly, internals have been reworked to use `crypto/ecdh`
* Parsing has completely been reworked. It is now possible to add your own `jwk.KeyParser`
to generate a custom `jwk.Key` that this library may not natively support. Also see
`jwk.RegisterKeyParser()`
* `jwk.KeyProbe` has been added to aid probing the JSON message. This is used to
guess the type of key described in the JSON message before deciding which concrete
type to instantiate, and aids implementing your own `jwk.KeyParser`. Also see
`jwk.RegisterKeyProbe()`
* Conversion between raw keys and `jwk.Key` can be customized using `jwk.KeyImporter` and `jwk.KeyExporter`.
Also see `jwk.RegisterKeyImporter()` and `jwk.RegisterKeyExporter()`
* Added `jwk/ecdsa` to keep track of which curves are available for ECDSA keys.
* `(jwk.Key).Raw()` has been deprecated. Use `jwk.Export()` instead to convert `jwk.Key`
objects into their "raw" versions (e.g. `*rsa.PrivateKey`, `*ecdsa.PrivateKey`, etc).
This is to allow third parties to register custom key types that this library does not
natively support: Whereas a method must be bound to an object, and thus does not necessarily
have a way to hook into a global settings (i.e. custom exporter/importer) for arbitrary
key types, if the entrypoint is a function it's much easier and cleaner to for third-parties
to take advantage and hook into the mechanisms.
* `jwk.FromRaw()` has been derepcated. Use `jwk.Import()` instead to convert "raw"
keys (e.g. `*rsa.PrivateKEy`, `*Ecdsa.PrivateKey`, etc) int `jwk.Key`s.
* `(jwk.Key).FromRaw()` has been deprecated. The method `(jwk.Key).Import()` still exist for
built-in types, but it is no longer part of any public API (`interface{}`).
* `jwk.Fetch` is marked as a simple wrapper around `net/http` and `jwk.Parse`.
* `jwk.SetGlobalFetcher` has been deprecated.
* `jwk.Fetcher` has been clearly marked as something that has limited
usage for `jws.WithVerifyAuto`
* `jwk.Key` with P256/P386/P521 curves can be exporrted to `ecdh.PrivateKey`/`ecdh.PublicKey`

View File

@@ -19,3 +19,4 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

34
vendor/github.com/lestrrat-go/jwx/v3/MODULE.bazel generated vendored Normal file
View File

@@ -0,0 +1,34 @@
module(
name = "com_github_lestrrat_go_jwx_v3",
version = "3.0.0",
repo_name = "com_github_lestrrat_go_jwx_v2",
)
bazel_dep(name = "bazel_skylib", version = "1.7.1")
bazel_dep(name = "rules_go", version = "0.55.1")
bazel_dep(name = "gazelle", version = "0.44.0")
bazel_dep(name = "aspect_bazel_lib", version = "2.11.0")
# Go SDK setup - using Go 1.24.4 to match the toolchain in go.mod
go_sdk = use_extension("@rules_go//go:extensions.bzl", "go_sdk")
go_sdk.download(version = "1.24.4")
# Go dependencies from go.mod
go_deps = use_extension("@gazelle//:extensions.bzl", "go_deps")
go_deps.from_file(go_mod = "//:go.mod")
# Use repositories for external Go dependencies
use_repo(
go_deps,
"com_github_decred_dcrd_dcrec_secp256k1_v4",
"com_github_goccy_go_json",
"com_github_lestrrat_go_blackmagic",
"com_github_lestrrat_go_dsig",
"com_github_lestrrat_go_dsig_secp256k1",
"com_github_lestrrat_go_httprc_v3",
"com_github_lestrrat_go_option_v2",
"com_github_segmentio_asm",
"com_github_stretchr_testify",
"com_github_valyala_fastjson",
"org_golang_x_crypto",
)

230
vendor/github.com/lestrrat-go/jwx/v3/MODULE.bazel.lock generated vendored Normal file
View File

@@ -0,0 +1,230 @@
{
"lockFileVersion": 18,
"registryFileHashes": {
"https://bcr.bazel.build/bazel_registry.json": "8a28e4aff06ee60aed2a8c281907fb8bcbf3b753c91fb5a5c57da3215d5b3497",
"https://bcr.bazel.build/modules/abseil-cpp/20210324.2/MODULE.bazel": "7cd0312e064fde87c8d1cd79ba06c876bd23630c83466e9500321be55c96ace2",
"https://bcr.bazel.build/modules/abseil-cpp/20211102.0/MODULE.bazel": "70390338f7a5106231d20620712f7cccb659cd0e9d073d1991c038eb9fc57589",
"https://bcr.bazel.build/modules/abseil-cpp/20230125.1/MODULE.bazel": "89047429cb0207707b2dface14ba7f8df85273d484c2572755be4bab7ce9c3a0",
"https://bcr.bazel.build/modules/abseil-cpp/20230802.0.bcr.1/MODULE.bazel": "1c8cec495288dccd14fdae6e3f95f772c1c91857047a098fad772034264cc8cb",
"https://bcr.bazel.build/modules/abseil-cpp/20230802.0/MODULE.bazel": "d253ae36a8bd9ee3c5955384096ccb6baf16a1b1e93e858370da0a3b94f77c16",
"https://bcr.bazel.build/modules/abseil-cpp/20230802.1/MODULE.bazel": "fa92e2eb41a04df73cdabeec37107316f7e5272650f81d6cc096418fe647b915",
"https://bcr.bazel.build/modules/abseil-cpp/20240116.1/MODULE.bazel": "37bcdb4440fbb61df6a1c296ae01b327f19e9bb521f9b8e26ec854b6f97309ed",
"https://bcr.bazel.build/modules/abseil-cpp/20240116.1/source.json": "9be551b8d4e3ef76875c0d744b5d6a504a27e3ae67bc6b28f46415fd2d2957da",
"https://bcr.bazel.build/modules/aspect_bazel_lib/2.11.0/MODULE.bazel": "cb1ba9f9999ed0bc08600c221f532c1ddd8d217686b32ba7d45b0713b5131452",
"https://bcr.bazel.build/modules/aspect_bazel_lib/2.11.0/source.json": "92494d5aa43b96665397dd13ee16023097470fa85e276b93674d62a244de47ee",
"https://bcr.bazel.build/modules/bazel_features/1.1.0/MODULE.bazel": "cfd42ff3b815a5f39554d97182657f8c4b9719568eb7fded2b9135f084bf760b",
"https://bcr.bazel.build/modules/bazel_features/1.1.1/MODULE.bazel": "27b8c79ef57efe08efccbd9dd6ef70d61b4798320b8d3c134fd571f78963dbcd",
"https://bcr.bazel.build/modules/bazel_features/1.11.0/MODULE.bazel": "f9382337dd5a474c3b7d334c2f83e50b6eaedc284253334cf823044a26de03e8",
"https://bcr.bazel.build/modules/bazel_features/1.15.0/MODULE.bazel": "d38ff6e517149dc509406aca0db3ad1efdd890a85e049585b7234d04238e2a4d",
"https://bcr.bazel.build/modules/bazel_features/1.17.0/MODULE.bazel": "039de32d21b816b47bd42c778e0454217e9c9caac4a3cf8e15c7231ee3ddee4d",
"https://bcr.bazel.build/modules/bazel_features/1.18.0/MODULE.bazel": "1be0ae2557ab3a72a57aeb31b29be347bcdc5d2b1eb1e70f39e3851a7e97041a",
"https://bcr.bazel.build/modules/bazel_features/1.19.0/MODULE.bazel": "59adcdf28230d220f0067b1f435b8537dd033bfff8db21335ef9217919c7fb58",
"https://bcr.bazel.build/modules/bazel_features/1.30.0/MODULE.bazel": "a14b62d05969a293b80257e72e597c2da7f717e1e69fa8b339703ed6731bec87",
"https://bcr.bazel.build/modules/bazel_features/1.30.0/source.json": "b07e17f067fe4f69f90b03b36ef1e08fe0d1f3cac254c1241a1818773e3423bc",
"https://bcr.bazel.build/modules/bazel_features/1.4.1/MODULE.bazel": "e45b6bb2350aff3e442ae1111c555e27eac1d915e77775f6fdc4b351b758b5d7",
"https://bcr.bazel.build/modules/bazel_features/1.9.0/MODULE.bazel": "885151d58d90d8d9c811eb75e3288c11f850e1d6b481a8c9f766adee4712358b",
"https://bcr.bazel.build/modules/bazel_features/1.9.1/MODULE.bazel": "8f679097876a9b609ad1f60249c49d68bfab783dd9be012faf9d82547b14815a",
"https://bcr.bazel.build/modules/bazel_skylib/1.0.3/MODULE.bazel": "bcb0fd896384802d1ad283b4e4eb4d718eebd8cb820b0a2c3a347fb971afd9d8",
"https://bcr.bazel.build/modules/bazel_skylib/1.1.1/MODULE.bazel": "1add3e7d93ff2e6998f9e118022c84d163917d912f5afafb3058e3d2f1545b5e",
"https://bcr.bazel.build/modules/bazel_skylib/1.2.0/MODULE.bazel": "44fe84260e454ed94ad326352a698422dbe372b21a1ac9f3eab76eb531223686",
"https://bcr.bazel.build/modules/bazel_skylib/1.2.1/MODULE.bazel": "f35baf9da0efe45fa3da1696ae906eea3d615ad41e2e3def4aeb4e8bc0ef9a7a",
"https://bcr.bazel.build/modules/bazel_skylib/1.3.0/MODULE.bazel": "20228b92868bf5cfc41bda7afc8a8ba2a543201851de39d990ec957b513579c5",
"https://bcr.bazel.build/modules/bazel_skylib/1.4.1/MODULE.bazel": "a0dcb779424be33100dcae821e9e27e4f2901d9dfd5333efe5ac6a8d7ab75e1d",
"https://bcr.bazel.build/modules/bazel_skylib/1.4.2/MODULE.bazel": "3bd40978e7a1fac911d5989e6b09d8f64921865a45822d8b09e815eaa726a651",
"https://bcr.bazel.build/modules/bazel_skylib/1.5.0/MODULE.bazel": "32880f5e2945ce6a03d1fbd588e9198c0a959bb42297b2cfaf1685b7bc32e138",
"https://bcr.bazel.build/modules/bazel_skylib/1.6.1/MODULE.bazel": "8fdee2dbaace6c252131c00e1de4b165dc65af02ea278476187765e1a617b917",
"https://bcr.bazel.build/modules/bazel_skylib/1.7.0/MODULE.bazel": "0db596f4563de7938de764cc8deeabec291f55e8ec15299718b93c4423e9796d",
"https://bcr.bazel.build/modules/bazel_skylib/1.7.1/MODULE.bazel": "3120d80c5861aa616222ec015332e5f8d3171e062e3e804a2a0253e1be26e59b",
"https://bcr.bazel.build/modules/bazel_skylib/1.7.1/source.json": "f121b43eeefc7c29efbd51b83d08631e2347297c95aac9764a701f2a6a2bb953",
"https://bcr.bazel.build/modules/buildozer/7.1.2/MODULE.bazel": "2e8dd40ede9c454042645fd8d8d0cd1527966aa5c919de86661e62953cd73d84",
"https://bcr.bazel.build/modules/buildozer/7.1.2/source.json": "c9028a501d2db85793a6996205c8de120944f50a0d570438fcae0457a5f9d1f8",
"https://bcr.bazel.build/modules/gazelle/0.32.0/MODULE.bazel": "b499f58a5d0d3537f3cf5b76d8ada18242f64ec474d8391247438bf04f58c7b8",
"https://bcr.bazel.build/modules/gazelle/0.33.0/MODULE.bazel": "a13a0f279b462b784fb8dd52a4074526c4a2afe70e114c7d09066097a46b3350",
"https://bcr.bazel.build/modules/gazelle/0.34.0/MODULE.bazel": "abdd8ce4d70978933209db92e436deb3a8b737859e9354fb5fd11fb5c2004c8a",
"https://bcr.bazel.build/modules/gazelle/0.36.0/MODULE.bazel": "e375d5d6e9a6ca59b0cb38b0540bc9a05b6aa926d322f2de268ad267a2ee74c0",
"https://bcr.bazel.build/modules/gazelle/0.44.0/MODULE.bazel": "fd3177ca0938da57a1e416cad3f39b9c4334defbc717e89aba9d9ddbbb0341da",
"https://bcr.bazel.build/modules/gazelle/0.44.0/source.json": "7fb65ef9c1ce470d099ca27fd478673d9d64c844af28d0d472b0874c7d590cb6",
"https://bcr.bazel.build/modules/google_benchmark/1.8.2/MODULE.bazel": "a70cf1bba851000ba93b58ae2f6d76490a9feb74192e57ab8e8ff13c34ec50cb",
"https://bcr.bazel.build/modules/googletest/1.11.0/MODULE.bazel": "3a83f095183f66345ca86aa13c58b59f9f94a2f81999c093d4eeaa2d262d12f4",
"https://bcr.bazel.build/modules/googletest/1.14.0.bcr.1/MODULE.bazel": "22c31a561553727960057361aa33bf20fb2e98584bc4fec007906e27053f80c6",
"https://bcr.bazel.build/modules/googletest/1.14.0.bcr.1/source.json": "41e9e129f80d8c8bf103a7acc337b76e54fad1214ac0a7084bf24f4cd924b8b4",
"https://bcr.bazel.build/modules/googletest/1.14.0/MODULE.bazel": "cfbcbf3e6eac06ef9d85900f64424708cc08687d1b527f0ef65aa7517af8118f",
"https://bcr.bazel.build/modules/jsoncpp/1.9.5/MODULE.bazel": "31271aedc59e815656f5736f282bb7509a97c7ecb43e927ac1a37966e0578075",
"https://bcr.bazel.build/modules/jsoncpp/1.9.5/source.json": "4108ee5085dd2885a341c7fab149429db457b3169b86eb081fa245eadf69169d",
"https://bcr.bazel.build/modules/libpfm/4.11.0/MODULE.bazel": "45061ff025b301940f1e30d2c16bea596c25b176c8b6b3087e92615adbd52902",
"https://bcr.bazel.build/modules/package_metadata/0.0.2/MODULE.bazel": "fb8d25550742674d63d7b250063d4580ca530499f045d70748b1b142081ebb92",
"https://bcr.bazel.build/modules/package_metadata/0.0.2/source.json": "e53a759a72488d2c0576f57491ef2da0cf4aab05ac0997314012495935531b73",
"https://bcr.bazel.build/modules/platforms/0.0.10/MODULE.bazel": "8cb8efaf200bdeb2150d93e162c40f388529a25852b332cec879373771e48ed5",
"https://bcr.bazel.build/modules/platforms/0.0.11/MODULE.bazel": "0daefc49732e227caa8bfa834d65dc52e8cc18a2faf80df25e8caea151a9413f",
"https://bcr.bazel.build/modules/platforms/0.0.11/source.json": "f7e188b79ebedebfe75e9e1d098b8845226c7992b307e28e1496f23112e8fc29",
"https://bcr.bazel.build/modules/platforms/0.0.4/MODULE.bazel": "9b328e31ee156f53f3c416a64f8491f7eb731742655a47c9eec4703a71644aee",
"https://bcr.bazel.build/modules/platforms/0.0.5/MODULE.bazel": "5733b54ea419d5eaf7997054bb55f6a1d0b5ff8aedf0176fef9eea44f3acda37",
"https://bcr.bazel.build/modules/platforms/0.0.6/MODULE.bazel": "ad6eeef431dc52aefd2d77ed20a4b353f8ebf0f4ecdd26a807d2da5aa8cd0615",
"https://bcr.bazel.build/modules/platforms/0.0.7/MODULE.bazel": "72fd4a0ede9ee5c021f6a8dd92b503e089f46c227ba2813ff183b71616034814",
"https://bcr.bazel.build/modules/platforms/0.0.8/MODULE.bazel": "9f142c03e348f6d263719f5074b21ef3adf0b139ee4c5133e2aa35664da9eb2d",
"https://bcr.bazel.build/modules/protobuf/21.7/MODULE.bazel": "a5a29bb89544f9b97edce05642fac225a808b5b7be74038ea3640fae2f8e66a7",
"https://bcr.bazel.build/modules/protobuf/27.0/MODULE.bazel": "7873b60be88844a0a1d8f80b9d5d20cfbd8495a689b8763e76c6372998d3f64c",
"https://bcr.bazel.build/modules/protobuf/27.1/MODULE.bazel": "703a7b614728bb06647f965264967a8ef1c39e09e8f167b3ca0bb1fd80449c0d",
"https://bcr.bazel.build/modules/protobuf/29.0-rc2.bcr.1/MODULE.bazel": "52f4126f63a2f0bbf36b99c2a87648f08467a4eaf92ba726bc7d6a500bbf770c",
"https://bcr.bazel.build/modules/protobuf/29.0-rc2/MODULE.bazel": "6241d35983510143049943fc0d57937937122baf1b287862f9dc8590fc4c37df",
"https://bcr.bazel.build/modules/protobuf/29.0/MODULE.bazel": "319dc8bf4c679ff87e71b1ccfb5a6e90a6dbc4693501d471f48662ac46d04e4e",
"https://bcr.bazel.build/modules/protobuf/29.0/source.json": "b857f93c796750eef95f0d61ee378f3420d00ee1dd38627b27193aa482f4f981",
"https://bcr.bazel.build/modules/protobuf/3.19.0/MODULE.bazel": "6b5fbb433f760a99a22b18b6850ed5784ef0e9928a72668b66e4d7ccd47db9b0",
"https://bcr.bazel.build/modules/protobuf/3.19.2/MODULE.bazel": "532ffe5f2186b69fdde039efe6df13ba726ff338c6bc82275ad433013fa10573",
"https://bcr.bazel.build/modules/protobuf/3.19.6/MODULE.bazel": "9233edc5e1f2ee276a60de3eaa47ac4132302ef9643238f23128fea53ea12858",
"https://bcr.bazel.build/modules/pybind11_bazel/2.11.1/MODULE.bazel": "88af1c246226d87e65be78ed49ecd1e6f5e98648558c14ce99176da041dc378e",
"https://bcr.bazel.build/modules/pybind11_bazel/2.11.1/source.json": "be4789e951dd5301282729fe3d4938995dc4c1a81c2ff150afc9f1b0504c6022",
"https://bcr.bazel.build/modules/re2/2023-09-01/MODULE.bazel": "cb3d511531b16cfc78a225a9e2136007a48cf8a677e4264baeab57fe78a80206",
"https://bcr.bazel.build/modules/re2/2023-09-01/source.json": "e044ce89c2883cd957a2969a43e79f7752f9656f6b20050b62f90ede21ec6eb4",
"https://bcr.bazel.build/modules/rules_android/0.1.1/MODULE.bazel": "48809ab0091b07ad0182defb787c4c5328bd3a278938415c00a7b69b50c4d3a8",
"https://bcr.bazel.build/modules/rules_android/0.1.1/source.json": "e6986b41626ee10bdc864937ffb6d6bf275bb5b9c65120e6137d56e6331f089e",
"https://bcr.bazel.build/modules/rules_cc/0.0.1/MODULE.bazel": "cb2aa0747f84c6c3a78dad4e2049c154f08ab9d166b1273835a8174940365647",
"https://bcr.bazel.build/modules/rules_cc/0.0.10/MODULE.bazel": "ec1705118f7eaedd6e118508d3d26deba2a4e76476ada7e0e3965211be012002",
"https://bcr.bazel.build/modules/rules_cc/0.0.13/MODULE.bazel": "0e8529ed7b323dad0775ff924d2ae5af7640b23553dfcd4d34344c7e7a867191",
"https://bcr.bazel.build/modules/rules_cc/0.0.14/MODULE.bazel": "5e343a3aac88b8d7af3b1b6d2093b55c347b8eefc2e7d1442f7a02dc8fea48ac",
"https://bcr.bazel.build/modules/rules_cc/0.0.15/MODULE.bazel": "6704c35f7b4a72502ee81f61bf88706b54f06b3cbe5558ac17e2e14666cd5dcc",
"https://bcr.bazel.build/modules/rules_cc/0.0.16/MODULE.bazel": "7661303b8fc1b4d7f532e54e9d6565771fea666fbdf839e0a86affcd02defe87",
"https://bcr.bazel.build/modules/rules_cc/0.0.2/MODULE.bazel": "6915987c90970493ab97393024c156ea8fb9f3bea953b2f3ec05c34f19b5695c",
"https://bcr.bazel.build/modules/rules_cc/0.0.6/MODULE.bazel": "abf360251023dfe3efcef65ab9d56beefa8394d4176dd29529750e1c57eaa33f",
"https://bcr.bazel.build/modules/rules_cc/0.0.8/MODULE.bazel": "964c85c82cfeb6f3855e6a07054fdb159aced38e99a5eecf7bce9d53990afa3e",
"https://bcr.bazel.build/modules/rules_cc/0.0.9/MODULE.bazel": "836e76439f354b89afe6a911a7adf59a6b2518fafb174483ad78a2a2fde7b1c5",
"https://bcr.bazel.build/modules/rules_cc/0.1.1/MODULE.bazel": "2f0222a6f229f0bf44cd711dc13c858dad98c62d52bd51d8fc3a764a83125513",
"https://bcr.bazel.build/modules/rules_cc/0.1.1/source.json": "d61627377bd7dd1da4652063e368d9366fc9a73920bfa396798ad92172cf645c",
"https://bcr.bazel.build/modules/rules_foreign_cc/0.9.0/MODULE.bazel": "c9e8c682bf75b0e7c704166d79b599f93b72cfca5ad7477df596947891feeef6",
"https://bcr.bazel.build/modules/rules_fuzzing/0.5.2/MODULE.bazel": "40c97d1144356f52905566c55811f13b299453a14ac7769dfba2ac38192337a8",
"https://bcr.bazel.build/modules/rules_fuzzing/0.5.2/source.json": "c8b1e2c717646f1702290959a3302a178fb639d987ab61d548105019f11e527e",
"https://bcr.bazel.build/modules/rules_go/0.41.0/MODULE.bazel": "55861d8e8bb0e62cbd2896f60ff303f62ffcb0eddb74ecb0e5c0cbe36fc292c8",
"https://bcr.bazel.build/modules/rules_go/0.42.0/MODULE.bazel": "8cfa875b9aa8c6fce2b2e5925e73c1388173ea3c32a0db4d2b4804b453c14270",
"https://bcr.bazel.build/modules/rules_go/0.46.0/MODULE.bazel": "3477df8bdcc49e698b9d25f734c4f3a9f5931ff34ee48a2c662be168f5f2d3fd",
"https://bcr.bazel.build/modules/rules_go/0.51.0/MODULE.bazel": "b6920f505935bfd69381651c942496d99b16e2a12f3dd5263b90ded16f3b4d0f",
"https://bcr.bazel.build/modules/rules_go/0.55.1/MODULE.bazel": "a57a6fc59a74326c0b440d07cca209edf13c7d1a641e48cfbeab56e79f873609",
"https://bcr.bazel.build/modules/rules_go/0.55.1/source.json": "827a740c8959c9d20616889e7746cde4dcc6ee80d25146943627ccea0736328f",
"https://bcr.bazel.build/modules/rules_java/4.0.0/MODULE.bazel": "5a78a7ae82cd1a33cef56dc578c7d2a46ed0dca12643ee45edbb8417899e6f74",
"https://bcr.bazel.build/modules/rules_java/5.3.5/MODULE.bazel": "a4ec4f2db570171e3e5eb753276ee4b389bae16b96207e9d3230895c99644b86",
"https://bcr.bazel.build/modules/rules_java/6.0.0/MODULE.bazel": "8a43b7df601a7ec1af61d79345c17b31ea1fedc6711fd4abfd013ea612978e39",
"https://bcr.bazel.build/modules/rules_java/6.3.0/MODULE.bazel": "a97c7678c19f236a956ad260d59c86e10a463badb7eb2eda787490f4c969b963",
"https://bcr.bazel.build/modules/rules_java/6.4.0/MODULE.bazel": "e986a9fe25aeaa84ac17ca093ef13a4637f6107375f64667a15999f77db6c8f6",
"https://bcr.bazel.build/modules/rules_java/6.5.2/MODULE.bazel": "1d440d262d0e08453fa0c4d8f699ba81609ed0e9a9a0f02cd10b3e7942e61e31",
"https://bcr.bazel.build/modules/rules_java/7.10.0/MODULE.bazel": "530c3beb3067e870561739f1144329a21c851ff771cd752a49e06e3dc9c2e71a",
"https://bcr.bazel.build/modules/rules_java/7.12.2/MODULE.bazel": "579c505165ee757a4280ef83cda0150eea193eed3bef50b1004ba88b99da6de6",
"https://bcr.bazel.build/modules/rules_java/7.2.0/MODULE.bazel": "06c0334c9be61e6cef2c8c84a7800cef502063269a5af25ceb100b192453d4ab",
"https://bcr.bazel.build/modules/rules_java/7.3.2/MODULE.bazel": "50dece891cfdf1741ea230d001aa9c14398062f2b7c066470accace78e412bc2",
"https://bcr.bazel.build/modules/rules_java/7.6.1/MODULE.bazel": "2f14b7e8a1aa2f67ae92bc69d1ec0fa8d9f827c4e17ff5e5f02e91caa3b2d0fe",
"https://bcr.bazel.build/modules/rules_java/8.12.0/MODULE.bazel": "8e6590b961f2defdfc2811c089c75716cb2f06c8a4edeb9a8d85eaa64ee2a761",
"https://bcr.bazel.build/modules/rules_java/8.12.0/source.json": "cbd5d55d9d38d4008a7d00bee5b5a5a4b6031fcd4a56515c9accbcd42c7be2ba",
"https://bcr.bazel.build/modules/rules_jvm_external/4.4.2/MODULE.bazel": "a56b85e418c83eb1839819f0b515c431010160383306d13ec21959ac412d2fe7",
"https://bcr.bazel.build/modules/rules_jvm_external/5.1/MODULE.bazel": "33f6f999e03183f7d088c9be518a63467dfd0be94a11d0055fe2d210f89aa909",
"https://bcr.bazel.build/modules/rules_jvm_external/5.2/MODULE.bazel": "d9351ba35217ad0de03816ef3ed63f89d411349353077348a45348b096615036",
"https://bcr.bazel.build/modules/rules_jvm_external/5.3/MODULE.bazel": "bf93870767689637164657731849fb887ad086739bd5d360d90007a581d5527d",
"https://bcr.bazel.build/modules/rules_jvm_external/6.1/MODULE.bazel": "75b5fec090dbd46cf9b7d8ea08cf84a0472d92ba3585b476f44c326eda8059c4",
"https://bcr.bazel.build/modules/rules_jvm_external/6.3/MODULE.bazel": "c998e060b85f71e00de5ec552019347c8bca255062c990ac02d051bb80a38df0",
"https://bcr.bazel.build/modules/rules_jvm_external/6.3/source.json": "6f5f5a5a4419ae4e37c35a5bb0a6ae657ed40b7abc5a5189111b47fcebe43197",
"https://bcr.bazel.build/modules/rules_kotlin/1.9.0/MODULE.bazel": "ef85697305025e5a61f395d4eaede272a5393cee479ace6686dba707de804d59",
"https://bcr.bazel.build/modules/rules_kotlin/1.9.6/MODULE.bazel": "d269a01a18ee74d0335450b10f62c9ed81f2321d7958a2934e44272fe82dcef3",
"https://bcr.bazel.build/modules/rules_kotlin/1.9.6/source.json": "2faa4794364282db7c06600b7e5e34867a564ae91bda7cae7c29c64e9466b7d5",
"https://bcr.bazel.build/modules/rules_license/0.0.3/MODULE.bazel": "627e9ab0247f7d1e05736b59dbb1b6871373de5ad31c3011880b4133cafd4bd0",
"https://bcr.bazel.build/modules/rules_license/0.0.7/MODULE.bazel": "088fbeb0b6a419005b89cf93fe62d9517c0a2b8bb56af3244af65ecfe37e7d5d",
"https://bcr.bazel.build/modules/rules_license/1.0.0/MODULE.bazel": "a7fda60eefdf3d8c827262ba499957e4df06f659330bbe6cdbdb975b768bb65c",
"https://bcr.bazel.build/modules/rules_license/1.0.0/source.json": "a52c89e54cc311196e478f8382df91c15f7a2bfdf4c6cd0e2675cc2ff0b56efb",
"https://bcr.bazel.build/modules/rules_pkg/0.7.0/MODULE.bazel": "df99f03fc7934a4737122518bb87e667e62d780b610910f0447665a7e2be62dc",
"https://bcr.bazel.build/modules/rules_pkg/1.0.1/MODULE.bazel": "5b1df97dbc29623bccdf2b0dcd0f5cb08e2f2c9050aab1092fd39a41e82686ff",
"https://bcr.bazel.build/modules/rules_pkg/1.0.1/source.json": "bd82e5d7b9ce2d31e380dd9f50c111d678c3bdaca190cb76b0e1c71b05e1ba8a",
"https://bcr.bazel.build/modules/rules_proto/4.0.0/MODULE.bazel": "a7a7b6ce9bee418c1a760b3d84f83a299ad6952f9903c67f19e4edd964894e06",
"https://bcr.bazel.build/modules/rules_proto/5.3.0-21.7/MODULE.bazel": "e8dff86b0971688790ae75528fe1813f71809b5afd57facb44dad9e8eca631b7",
"https://bcr.bazel.build/modules/rules_proto/6.0.0/MODULE.bazel": "b531d7f09f58dce456cd61b4579ce8c86b38544da75184eadaf0a7cb7966453f",
"https://bcr.bazel.build/modules/rules_proto/6.0.2/MODULE.bazel": "ce916b775a62b90b61888052a416ccdda405212b6aaeb39522f7dc53431a5e73",
"https://bcr.bazel.build/modules/rules_proto/7.0.2/MODULE.bazel": "bf81793bd6d2ad89a37a40693e56c61b0ee30f7a7fdbaf3eabbf5f39de47dea2",
"https://bcr.bazel.build/modules/rules_proto/7.0.2/source.json": "1e5e7260ae32ef4f2b52fd1d0de8d03b606a44c91b694d2f1afb1d3b28a48ce1",
"https://bcr.bazel.build/modules/rules_python/0.10.2/MODULE.bazel": "cc82bc96f2997baa545ab3ce73f196d040ffb8756fd2d66125a530031cd90e5f",
"https://bcr.bazel.build/modules/rules_python/0.23.1/MODULE.bazel": "49ffccf0511cb8414de28321f5fcf2a31312b47c40cc21577144b7447f2bf300",
"https://bcr.bazel.build/modules/rules_python/0.25.0/MODULE.bazel": "72f1506841c920a1afec76975b35312410eea3aa7b63267436bfb1dd91d2d382",
"https://bcr.bazel.build/modules/rules_python/0.28.0/MODULE.bazel": "cba2573d870babc976664a912539b320cbaa7114cd3e8f053c720171cde331ed",
"https://bcr.bazel.build/modules/rules_python/0.31.0/MODULE.bazel": "93a43dc47ee570e6ec9f5779b2e64c1476a6ce921c48cc9a1678a91dd5f8fd58",
"https://bcr.bazel.build/modules/rules_python/0.4.0/MODULE.bazel": "9208ee05fd48bf09ac60ed269791cf17fb343db56c8226a720fbb1cdf467166c",
"https://bcr.bazel.build/modules/rules_python/0.40.0/MODULE.bazel": "9d1a3cd88ed7d8e39583d9ffe56ae8a244f67783ae89b60caafc9f5cf318ada7",
"https://bcr.bazel.build/modules/rules_python/0.40.0/source.json": "939d4bd2e3110f27bfb360292986bb79fd8dcefb874358ccd6cdaa7bda029320",
"https://bcr.bazel.build/modules/rules_shell/0.2.0/MODULE.bazel": "fda8a652ab3c7d8fee214de05e7a9916d8b28082234e8d2c0094505c5268ed3c",
"https://bcr.bazel.build/modules/rules_shell/0.3.0/MODULE.bazel": "de4402cd12f4cc8fda2354fce179fdb068c0b9ca1ec2d2b17b3e21b24c1a937b",
"https://bcr.bazel.build/modules/rules_shell/0.3.0/source.json": "c55ed591aa5009401ddf80ded9762ac32c358d2517ee7820be981e2de9756cf3",
"https://bcr.bazel.build/modules/stardoc/0.5.1/MODULE.bazel": "1a05d92974d0c122f5ccf09291442580317cdd859f07a8655f1db9a60374f9f8",
"https://bcr.bazel.build/modules/stardoc/0.5.3/MODULE.bazel": "c7f6948dae6999bf0db32c1858ae345f112cacf98f174c7a8bb707e41b974f1c",
"https://bcr.bazel.build/modules/stardoc/0.5.6/MODULE.bazel": "c43dabc564990eeab55e25ed61c07a1aadafe9ece96a4efabb3f8bf9063b71ef",
"https://bcr.bazel.build/modules/stardoc/0.6.2/MODULE.bazel": "7060193196395f5dd668eda046ccbeacebfd98efc77fed418dbe2b82ffaa39fd",
"https://bcr.bazel.build/modules/stardoc/0.7.0/MODULE.bazel": "05e3d6d30c099b6770e97da986c53bd31844d7f13d41412480ea265ac9e8079c",
"https://bcr.bazel.build/modules/stardoc/0.7.1/MODULE.bazel": "3548faea4ee5dda5580f9af150e79d0f6aea934fc60c1cc50f4efdd9420759e7",
"https://bcr.bazel.build/modules/stardoc/0.7.1/source.json": "b6500ffcd7b48cd72c29bb67bcac781e12701cc0d6d55d266a652583cfcdab01",
"https://bcr.bazel.build/modules/upb/0.0.0-20220923-a547704/MODULE.bazel": "7298990c00040a0e2f121f6c32544bab27d4452f80d9ce51349b1a28f3005c43",
"https://bcr.bazel.build/modules/zlib/1.2.11/MODULE.bazel": "07b389abc85fdbca459b69e2ec656ae5622873af3f845e1c9d80fe179f3effa0",
"https://bcr.bazel.build/modules/zlib/1.2.12/MODULE.bazel": "3b1a8834ada2a883674be8cbd36ede1b6ec481477ada359cd2d3ddc562340b27",
"https://bcr.bazel.build/modules/zlib/1.3.1.bcr.5/MODULE.bazel": "eec517b5bbe5492629466e11dae908d043364302283de25581e3eb944326c4ca",
"https://bcr.bazel.build/modules/zlib/1.3.1.bcr.5/source.json": "22bc55c47af97246cfc093d0acf683a7869377de362b5d1c552c2c2e16b7a806",
"https://bcr.bazel.build/modules/zlib/1.3.1/MODULE.bazel": "751c9940dcfe869f5f7274e1295422a34623555916eb98c174c1e945594bf198"
},
"selectedYankedVersions": {},
"moduleExtensions": {
"@@rules_kotlin+//src/main/starlark/core/repositories:bzlmod_setup.bzl%rules_kotlin_extensions": {
"general": {
"bzlTransitiveDigest": "hUTp2w+RUVdL7ma5esCXZJAFnX7vLbVfLd7FwnQI6bU=",
"usagesDigest": "QI2z8ZUR+mqtbwsf2fLqYdJAkPOHdOV+tF2yVAUgRzw=",
"recordedFileInputs": {},
"recordedDirentsInputs": {},
"envVariables": {},
"generatedRepoSpecs": {
"com_github_jetbrains_kotlin_git": {
"repoRuleId": "@@rules_kotlin+//src/main/starlark/core/repositories:compiler.bzl%kotlin_compiler_git_repository",
"attributes": {
"urls": [
"https://github.com/JetBrains/kotlin/releases/download/v1.9.23/kotlin-compiler-1.9.23.zip"
],
"sha256": "93137d3aab9afa9b27cb06a824c2324195c6b6f6179d8a8653f440f5bd58be88"
}
},
"com_github_jetbrains_kotlin": {
"repoRuleId": "@@rules_kotlin+//src/main/starlark/core/repositories:compiler.bzl%kotlin_capabilities_repository",
"attributes": {
"git_repository_name": "com_github_jetbrains_kotlin_git",
"compiler_version": "1.9.23"
}
},
"com_github_google_ksp": {
"repoRuleId": "@@rules_kotlin+//src/main/starlark/core/repositories:ksp.bzl%ksp_compiler_plugin_repository",
"attributes": {
"urls": [
"https://github.com/google/ksp/releases/download/1.9.23-1.0.20/artifacts.zip"
],
"sha256": "ee0618755913ef7fd6511288a232e8fad24838b9af6ea73972a76e81053c8c2d",
"strip_version": "1.9.23-1.0.20"
}
},
"com_github_pinterest_ktlint": {
"repoRuleId": "@@bazel_tools//tools/build_defs/repo:http.bzl%http_file",
"attributes": {
"sha256": "01b2e0ef893383a50dbeb13970fe7fa3be36ca3e83259e01649945b09d736985",
"urls": [
"https://github.com/pinterest/ktlint/releases/download/1.3.0/ktlint"
],
"executable": true
}
},
"rules_android": {
"repoRuleId": "@@bazel_tools//tools/build_defs/repo:http.bzl%http_archive",
"attributes": {
"sha256": "cd06d15dd8bb59926e4d65f9003bfc20f9da4b2519985c27e190cddc8b7a7806",
"strip_prefix": "rules_android-0.1.1",
"urls": [
"https://github.com/bazelbuild/rules_android/archive/v0.1.1.zip"
]
}
}
},
"recordedRepoMappingEntries": [
[
"rules_kotlin+",
"bazel_tools",
"bazel_tools"
]
]
}
}
}
}

98
vendor/github.com/lestrrat-go/jwx/v3/Makefile generated vendored Normal file
View File

@@ -0,0 +1,98 @@
.PHONY: generate realclean cover viewcover test lint check_diffs imports tidy jwx
generate:
@go generate
@$(MAKE) generate-jwa generate-jwe generate-jwk generate-jws generate-jwt
@./tools/cmd/gofmt.sh
generate-%:
@go generate $(shell pwd -P)/$(patsubst generate-%,%,$@)
realclean:
rm coverage.out
test-cmd:
env TESTOPTS="$(TESTOPTS)" ./tools/test.sh
test:
$(MAKE) test-stdlib TESTOPTS=
test-stdlib:
$(MAKE) test-cmd TESTOPTS=
test-goccy:
$(MAKE) test-cmd TESTOPTS="-tags jwx_goccy"
test-es256k:
$(MAKE) test-cmd TESTOPTS="-tags jwx_es256k"
test-secp256k1-pem:
$(MAKE) test-cmd TESTOPTS="-tags jwx_es256k,jwx_secp256k1_pem"
test-asmbase64:
$(MAKE) test-cmd TESTOPTS="-tags jwx_asmbase64"
test-alltags:
$(MAKE) test-cmd TESTOPTS="-tags jwx_asmbase64,jwx_goccy,jwx_es256k,jwx_secp256k1_pem"
cover-cmd:
env MODE=cover ./tools/test.sh
cover:
$(MAKE) cover-stdlib
cover-stdlib:
$(MAKE) cover-cmd TESTOPTS=
cover-goccy:
$(MAKE) cover-cmd TESTOPTS="-tags jwx_goccy"
cover-es256k:
$(MAKE) cover-cmd TESTOPTS="-tags jwx_es256k"
cover-secp256k1-pem:
$(MAKE) cover-cmd TESTOPTS="-tags jwx_es256k,jwx_secp256k1"
cover-asmbase64:
$(MAKE) cover-cmd TESTOPTS="-tags jwx_asmbase64"
cover-alltags:
$(MAKE) cover-cmd TESTOPTS="-tags jwx_asmbase64,jwx_goccy,jwx_es256k,jwx_secp256k1_pem"
smoke-cmd:
env MODE=short ./tools/test.sh
smoke:
$(MAKE) smoke-stdlib
smoke-stdlib:
$(MAKE) smoke-cmd TESTOPTS=
smoke-goccy:
$(MAKE) smoke-cmd TESTOPTS="-tags jwx_goccy"
smoke-es256k:
$(MAKE) smoke-cmd TESTOPTS="-tags jwx_es256k"
smoke-secp256k1-pem:
$(MAKE) smoke-cmd TESTOPTS="-tags jwx_es256k,jwx_secp256k1_pem"
smoke-alltags:
$(MAKE) smoke-cmd TESTOPTS="-tags jwx_goccy,jwx_es256k,jwx_secp256k1_pem"
viewcover:
go tool cover -html=coverage.out
lint:
golangci-lint run ./...
check_diffs:
./scripts/check-diff.sh
imports:
goimports -w ./
tidy:
./scripts/tidy.sh
jwx:
@./tools/cmd/install-jwx.sh

263
vendor/github.com/lestrrat-go/jwx/v3/README.md generated vendored Normal file
View File

@@ -0,0 +1,263 @@
# github.com/lestrrat-go/jwx/v3 [![CI](https://github.com/lestrrat-go/jwx/actions/workflows/ci.yml/badge.svg)](https://github.com/lestrrat-go/jwx/actions/workflows/ci.yml) [![Go Reference](https://pkg.go.dev/badge/github.com/lestrrat-go/jwx/v3.svg)](https://pkg.go.dev/github.com/lestrrat-go/jwx/v3) [![codecov.io](https://codecov.io/github/lestrrat-go/jwx/coverage.svg?branch=v3)](https://codecov.io/github/lestrrat-go/jwx?branch=v3)
Go module implementing various JWx (JWA/JWE/JWK/JWS/JWT, otherwise known as JOSE) technologies.
If you are using this module in your product or your company, please add your product and/or company name in the [Wiki](https://github.com/lestrrat-go/jwx/wiki/Users)! It really helps keeping up our motivation.
# Features
* Complete coverage of JWA/JWE/JWK/JWS/JWT, not just JWT+minimum tool set.
* Supports JWS messages with multiple signatures, both compact and JSON serialization
* Supports JWS with detached payload
* Supports JWS with unencoded payload (RFC7797)
* Supports JWE messages with multiple recipients, both compact and JSON serialization
* Most operations work with either JWK or raw keys e.g. *rsa.PrivateKey, *ecdsa.PrivateKey, etc).
* Opinionated, but very uniform API. Everything is symmetric, and follows a standard convention
* jws.Parse/Verify/Sign
* jwe.Parse/Encrypt/Decrypt
* Arguments are organized as explicit required parameters and optional WithXXXX() style options.
* Extra utilities
* `jwk.Cache` to always keep a JWKS up-to-date
* [bazel](https://bazel.build)-ready
Some more in-depth discussion on why you might want to use this library over others
can be found in the [Description section](#description)
If you are using v0 or v1, you are strongly encouraged to migrate to using v3
(the version that comes with the README you are reading).
# SYNOPSIS
<!-- INCLUDE(examples/jwx_readme_example_test.go) -->
```go
package examples_test
import (
"bytes"
"fmt"
"net/http"
"time"
"github.com/lestrrat-go/jwx/v3/jwa"
"github.com/lestrrat-go/jwx/v3/jwe"
"github.com/lestrrat-go/jwx/v3/jwk"
"github.com/lestrrat-go/jwx/v3/jws"
"github.com/lestrrat-go/jwx/v3/jwt"
)
func Example() {
// Parse, serialize, slice and dice JWKs!
privkey, err := jwk.ParseKey(jsonRSAPrivateKey)
if err != nil {
fmt.Printf("failed to parse JWK: %s\n", err)
return
}
pubkey, err := jwk.PublicKeyOf(privkey)
if err != nil {
fmt.Printf("failed to get public key: %s\n", err)
return
}
// Work with JWTs!
{
// Build a JWT!
tok, err := jwt.NewBuilder().
Issuer(`github.com/lestrrat-go/jwx`).
IssuedAt(time.Now()).
Build()
if err != nil {
fmt.Printf("failed to build token: %s\n", err)
return
}
// Sign a JWT!
signed, err := jwt.Sign(tok, jwt.WithKey(jwa.RS256(), privkey))
if err != nil {
fmt.Printf("failed to sign token: %s\n", err)
return
}
// Verify a JWT!
{
verifiedToken, err := jwt.Parse(signed, jwt.WithKey(jwa.RS256(), pubkey))
if err != nil {
fmt.Printf("failed to verify JWS: %s\n", err)
return
}
_ = verifiedToken
}
// Work with *http.Request!
{
req, err := http.NewRequest(http.MethodGet, `https://github.com/lestrrat-go/jwx`, nil)
req.Header.Set(`Authorization`, fmt.Sprintf(`Bearer %s`, signed))
verifiedToken, err := jwt.ParseRequest(req, jwt.WithKey(jwa.RS256(), pubkey))
if err != nil {
fmt.Printf("failed to verify token from HTTP request: %s\n", err)
return
}
_ = verifiedToken
}
}
// Encrypt and Decrypt arbitrary payload with JWE!
{
encrypted, err := jwe.Encrypt(payloadLoremIpsum, jwe.WithKey(jwa.RSA_OAEP(), jwkRSAPublicKey))
if err != nil {
fmt.Printf("failed to encrypt payload: %s\n", err)
return
}
decrypted, err := jwe.Decrypt(encrypted, jwe.WithKey(jwa.RSA_OAEP(), jwkRSAPrivateKey))
if err != nil {
fmt.Printf("failed to decrypt payload: %s\n", err)
return
}
if !bytes.Equal(decrypted, payloadLoremIpsum) {
fmt.Printf("verified payload did not match\n")
return
}
}
// Sign and Verify arbitrary payload with JWS!
{
signed, err := jws.Sign(payloadLoremIpsum, jws.WithKey(jwa.RS256(), jwkRSAPrivateKey))
if err != nil {
fmt.Printf("failed to sign payload: %s\n", err)
return
}
verified, err := jws.Verify(signed, jws.WithKey(jwa.RS256(), jwkRSAPublicKey))
if err != nil {
fmt.Printf("failed to verify payload: %s\n", err)
return
}
if !bytes.Equal(verified, payloadLoremIpsum) {
fmt.Printf("verified payload did not match\n")
return
}
}
// OUTPUT:
}
```
source: [examples/jwx_readme_example_test.go](https://github.com/lestrrat-go/jwx/blob/v3/examples/jwx_readme_example_test.go)
<!-- END INCLUDE -->
# How-to Documentation
* [API documentation](https://pkg.go.dev/github.com/lestrrat-go/jwx/v3)
* [How-to style documentation](./docs)
* [Runnable Examples](./examples)
# Description
This Go module implements JWA, JWE, JWK, JWS, and JWT. Please see the following table for the list of
available packages:
| Package name | Notes |
|-----------------------------------------------------------|-------------------------------------------------|
| [jwt](https://github.com/lestrrat-go/jwx/tree/v3/jwt) | [RFC 7519](https://tools.ietf.org/html/rfc7519) |
| [jwk](https://github.com/lestrrat-go/jwx/tree/v3/jwk) | [RFC 7517](https://tools.ietf.org/html/rfc7517) + [RFC 7638](https://tools.ietf.org/html/rfc7638) |
| [jwa](https://github.com/lestrrat-go/jwx/tree/v3/jwa) | [RFC 7518](https://tools.ietf.org/html/rfc7518) |
| [jws](https://github.com/lestrrat-go/jwx/tree/v3/jws) | [RFC 7515](https://tools.ietf.org/html/rfc7515) + [RFC 7797](https://tools.ietf.org/html/rfc7797) |
| [jwe](https://github.com/lestrrat-go/jwx/tree/v3/jwe) | [RFC 7516](https://tools.ietf.org/html/rfc7516) |
## History
My goal was to write a server that heavily uses JWK and JWT. At first glance
the libraries that already exist seemed sufficient, but soon I realized that
1. To completely implement the protocols, I needed the entire JWT, JWK, JWS, JWE (and JWA, by necessity).
2. Most of the libraries that existed only deal with a subset of the various JWx specifications that were necessary to implement their specific needs
For example, a certain library looks like it had most of JWS, JWE, JWK covered, but then it lacked the ability to include private claims in its JWT responses. Another library had support of all the private claims, but completely lacked in its flexibility to generate various different response formats.
Because I was writing the server side (and the client side for testing), I needed the *entire* JOSE toolset to properly implement my server, **and** they needed to be *flexible* enough to fulfill the entire spec that I was writing.
So here's `github.com/lestrrat-go/jwx/v3`. This library is extensible, customizable, and hopefully well organized to the point that it is easy for you to slice and dice it.
## Why would I use this library?
There are several other major Go modules that handle JWT and related data formats,
so why should you use this library?
From a purely functional perspective, the only major difference is this:
Whereas most other projects only deal with what they seem necessary to handle
JWTs, this module handles the **_entire_** spectrum of JWS, JWE, JWK, and JWT.
That is, if you need to not only parse JWTs, but also to control JWKs, or
if you need to handle payloads that are NOT JWTs, you should probably consider
using this module. You should also note that JWT is built _on top_ of those
other technologies. You simply cannot have a complete JWT package without
implementing the entirety of JWS/JWE/JWK, which this library does.
Next, from an implementation perspective, this module differs significantly
from others in that it tries very hard to expose only the APIs, and not the
internal data. For example, individual JWT claims are not accessible through
struct field lookups. You need to use one of the getter methods.
This is because this library takes the stance that the end user is fully capable
and even willing to shoot themselves on the foot when presented with a lax
API. By making sure that users do not have access to open structs, we can protect
users from doing silly things like creating _incomplete_ structs, or access the
structs concurrently without any protection. This structure also allows
us to put extra smarts in the structs, such as doing the right thing when
you want to parse / write custom fields (this module does not require the user
to specify alternate structs to parse objects with custom fields)
In the end I think it comes down to your usage pattern, and priorities.
Some general guidelines that come to mind are:
* If you want a single library to handle everything JWx, such as using JWE, JWK, JWS, handling [auto-refreshing JWKs](https://github.com/lestrrat-go/jwx/blob/v3/docs/04-jwk.md#auto-refreshing-remote-keys), use this module.
* If you want to honor all possible custom fields transparently, use this module.
* If you want a standardized clean API, use this module.
Otherwise, feel free to choose something else.
# Contributions
## Issues
For bug reports and feature requests, please try to follow the issue templates as much as possible.
For either bug reports or feature requests, failing tests are even better.
## Pull Requests
Please make sure to include tests that exercise the changes you made.
If you are editing auto-generated files (those files with the `_gen.go` suffix, please make sure that you do the following:
1. Edit the generator, not the generated files (e.g. internal/cmd/genreadfile/main.go)
2. Run `make generate` (or `go generate`) to generate the new code
3. Commit _both_ the generator _and_ the generated files
## Discussions / Usage
Please try [discussions](https://github.com/lestrrat-go/jwx/tree/v3/discussions) first.
# Related Modules
* [github.com/lestrrat-go/echo-middleware-jwx](https://github.com/lestrrat-go/echo-middleware-jwx) - Sample Echo middleware
* [github.com/jwx-go/crypto-signer/gcp](https://github.com/jwx-go/crypto-signer/tree/main/gcp) - GCP KMS wrapper that implements [`crypto.Signer`](https://pkg.go.dev/crypto#Signer)
* [github.com/jwx-go/crypto-signer/aws](https://github.com/jwx-go/crypto-signer/tree/main/aws) - AWS KMS wrapper that implements [`crypto.Signer`](https://pkg.go.dev/crypto#Signer)
# Credits
* Initial work on this library was generously sponsored by HDE Inc (https://www.hde.co.jp)
* Lots of code, especially JWE was initially taken from go-jose library (https://github.com/square/go-jose)
* Lots of individual contributors have helped this project over the years. Thank each and everyone of you very much.
# Quid pro quo
If you use this software to build products in a for-profit organization, we ask you to _consider_
contributing back to FOSS in the following manner:
* For every 100 employees (direct hires) of your organization, please consider contributing minimum of $1 every year to either this project, **or** another FOSS projects that this project uses. For example, for 100 employees, we ask you contribute $100 yearly; for 10,000 employees, we ask you contribute $10,000 yearly.
* If possible, please make this information public. You do not need to disclose the amount you are contributing, but please make the information that you are contributing to particular FOSS projects public. For this project, please consider writing your name on the [Wiki](https://github.com/lestrrat-go/jwx/wiki/Users)
This is _NOT_ a licensing term: you are still free to use this software according to the license it
comes with. This clause is only a plea for people to acknowledge the work from FOSS developers whose
work you rely on each and everyday.

18
vendor/github.com/lestrrat-go/jwx/v3/SECURITY.md generated vendored Normal file
View File

@@ -0,0 +1,18 @@
# Security Policy
## Supported Versions
Most recent two major versions will receive security updates
| Version | Supported |
| -------- | ------------------ |
| v3.x.x | :white_check_mark: |
| v2.x.x | :white_check_mark: |
| < v2.0.0 | :x: |
## Reporting a Vulnerability
If you think you found a vulnerability, please report it via [GitHub Security Advisory](https://github.com/lestrrat-go/jwx/security/advisories/new).
Please include explicit steps to reproduce the security issue.
We will do our best to respond in a timely manner, but please also be aware that this project is maintained by a very limited number of people. Please help us with test code and such.

2
vendor/github.com/lestrrat-go/jwx/v3/WORKSPACE generated vendored Normal file
View File

@@ -0,0 +1,2 @@
# Empty WORKSPACE file for bzlmod compatibility
# All dependencies are now managed in MODULE.bazel

34
vendor/github.com/lestrrat-go/jwx/v3/cert/BUILD.bazel generated vendored Normal file
View File

@@ -0,0 +1,34 @@
load("@rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "cert",
srcs = [
"cert.go",
"chain.go",
],
importpath = "github.com/lestrrat-go/jwx/v3/cert",
visibility = ["//visibility:public"],
deps = [
"//internal/base64",
"//internal/tokens",
],
)
go_test(
name = "cert_test",
srcs = [
"cert_test.go",
"chain_test.go",
],
deps = [
":cert",
"//internal/jwxtest",
"@com_github_stretchr_testify//require",
],
)
alias(
name = "go_default_library",
actual = ":cert",
visibility = ["//visibility:public"],
)

48
vendor/github.com/lestrrat-go/jwx/v3/cert/cert.go generated vendored Normal file
View File

@@ -0,0 +1,48 @@
package cert
import (
"crypto/x509"
stdlibb64 "encoding/base64"
"fmt"
"io"
"github.com/lestrrat-go/jwx/v3/internal/base64"
)
// Create is a wrapper around x509.CreateCertificate, but it additionally
// encodes it in base64 so that it can be easily added to `x5c` fields
func Create(rand io.Reader, template, parent *x509.Certificate, pub, priv any) ([]byte, error) {
der, err := x509.CreateCertificate(rand, template, parent, pub, priv)
if err != nil {
return nil, fmt.Errorf(`failed to create x509 certificate: %w`, err)
}
return EncodeBase64(der)
}
// EncodeBase64 is a utility function to encode ASN.1 DER certificates
// using base64 encoding. This operation is normally done by `pem.Encode`
// but since PEM would include the markers (`-----BEGIN`, and the like)
// while `x5c` fields do not need this, this function can be used to
// shave off a few lines
func EncodeBase64(der []byte) ([]byte, error) {
enc := stdlibb64.StdEncoding
dst := make([]byte, enc.EncodedLen(len(der)))
enc.Encode(dst, der)
return dst, nil
}
// Parse is a utility function to decode a base64 encoded
// ASN.1 DER format certificate, and to parse the byte sequence.
// The certificate must be in PKIX format, and it must not contain PEM markers
func Parse(src []byte) (*x509.Certificate, error) {
dst, err := base64.Decode(src)
if err != nil {
return nil, fmt.Errorf(`failed to base64 decode the certificate: %w`, err)
}
cert, err := x509.ParseCertificate(dst)
if err != nil {
return nil, fmt.Errorf(`failed to parse x509 certificate: %w`, err)
}
return cert, nil
}

80
vendor/github.com/lestrrat-go/jwx/v3/cert/chain.go generated vendored Normal file
View File

@@ -0,0 +1,80 @@
package cert
import (
"bytes"
"encoding/json"
"fmt"
"github.com/lestrrat-go/jwx/v3/internal/tokens"
)
// Chain represents a certificate chain as used in the `x5c` field of
// various objects within JOSE.
//
// It stores the certificates as a list of base64 encoded []byte
// sequence. By definition these values must PKIX encoded.
type Chain struct {
certificates [][]byte
}
func (cc Chain) MarshalJSON() ([]byte, error) {
var buf bytes.Buffer
buf.WriteByte(tokens.OpenSquareBracket)
for i, cert := range cc.certificates {
if i > 0 {
buf.WriteByte(tokens.Comma)
}
buf.WriteByte('"')
buf.Write(cert)
buf.WriteByte('"')
}
buf.WriteByte(tokens.CloseSquareBracket)
return buf.Bytes(), nil
}
func (cc *Chain) UnmarshalJSON(data []byte) error {
var tmp []string
if err := json.Unmarshal(data, &tmp); err != nil {
return fmt.Errorf(`failed to unmarshal certificate chain: %w`, err)
}
certs := make([][]byte, len(tmp))
for i, cert := range tmp {
certs[i] = []byte(cert)
}
cc.certificates = certs
return nil
}
// Get returns the n-th ASN.1 DER + base64 encoded certificate
// stored. `false` will be returned in the second argument if
// the corresponding index is out of range.
func (cc *Chain) Get(index int) ([]byte, bool) {
if index < 0 || index >= len(cc.certificates) {
return nil, false
}
return cc.certificates[index], true
}
// Len returns the number of certificates stored in this Chain
func (cc *Chain) Len() int {
return len(cc.certificates)
}
var pemStart = []byte("----- BEGIN CERTIFICATE -----")
var pemEnd = []byte("----- END CERTIFICATE -----")
func (cc *Chain) AddString(der string) error {
return cc.Add([]byte(der))
}
func (cc *Chain) Add(der []byte) error {
// We're going to be nice and remove marker lines if they
// give it to us
der = bytes.TrimPrefix(der, pemStart)
der = bytes.TrimSuffix(der, pemEnd)
der = bytes.TrimSpace(der)
cc.certificates = append(cc.certificates, der)
return nil
}

2
vendor/github.com/lestrrat-go/jwx/v3/codecov.yml generated vendored Normal file
View File

@@ -0,0 +1,2 @@
codecov:
allow_coverage_offsets: true

104
vendor/github.com/lestrrat-go/jwx/v3/format.go generated vendored Normal file
View File

@@ -0,0 +1,104 @@
package jwx
import (
"bytes"
"encoding/json"
"github.com/lestrrat-go/jwx/v3/internal/tokens"
)
type FormatKind int
// These constants describe the result from guessing the format
// of the incoming buffer.
const (
// InvalidFormat is returned when the format of the incoming buffer
// has been deemed conclusively invalid
InvalidFormat FormatKind = iota
// UnknownFormat is returned when GuessFormat was not able to conclusively
// determine the format of the
UnknownFormat
JWE
JWS
JWK
JWKS
JWT
)
type formatHint struct {
Payload json.RawMessage `json:"payload"` // Only in JWS
Signatures json.RawMessage `json:"signatures"` // Only in JWS
Ciphertext json.RawMessage `json:"ciphertext"` // Only in JWE
KeyType json.RawMessage `json:"kty"` // Only in JWK
Keys json.RawMessage `json:"keys"` // Only in JWKS
Audience json.RawMessage `json:"aud"` // Only in JWT
}
// GuessFormat is used to guess the format the given payload is in
// using heuristics. See the type FormatKind for a full list of
// possible types.
//
// This may be useful in determining your next action when you may
// encounter a payload that could either be a JWE, JWS, or a plain JWT.
//
// Because JWTs are almost always JWS signed, you may be thrown off
// if you pass what you think is a JWT payload to this function.
// If the function is in the "Compact" format, it means it's a JWS
// signed message, and its payload is the JWT. Therefore this function
// will return JWS, not JWT.
//
// This function requires an extra parsing of the payload, and therefore
// may be inefficient if you call it every time before parsing.
func GuessFormat(payload []byte) FormatKind {
// The check against kty, keys, and aud are something this library
// made up. for the distinctions between JWE and JWS, we used
// https://datatracker.ietf.org/doc/html/rfc7516#section-9.
//
// The above RFC described several ways to distinguish between
// a JWE and JWS JSON, but we're only using one of them
payload = bytes.TrimSpace(payload)
if len(payload) <= 0 {
return UnknownFormat
}
if payload[0] != tokens.OpenCurlyBracket {
// Compact format. It's probably a JWS or JWE
sep := []byte{tokens.Period} // I want to const this :/
// Note: this counts the number of occurrences of the
// separator, but the RFC talks about the number of segments.
// number of tokens.Period == segments - 1, so that's why we have 2 and 4 here
switch count := bytes.Count(payload, sep); count {
case 2:
return JWS
case 4:
return JWE
default:
return InvalidFormat
}
}
// If we got here, we probably have JSON.
var h formatHint
if err := json.Unmarshal(payload, &h); err != nil {
return UnknownFormat
}
if h.Audience != nil {
return JWT
}
if h.KeyType != nil {
return JWK
}
if h.Keys != nil {
return JWKS
}
if h.Ciphertext != nil {
return JWE
}
if h.Signatures != nil && h.Payload != nil {
return JWS
}
return UnknownFormat
}

View File

@@ -0,0 +1,29 @@
// Code generated by "stringer -type=FormatKind"; DO NOT EDIT.
package jwx
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[InvalidFormat-0]
_ = x[UnknownFormat-1]
_ = x[JWE-2]
_ = x[JWS-3]
_ = x[JWK-4]
_ = x[JWKS-5]
_ = x[JWT-6]
}
const _FormatKind_name = "InvalidFormatUnknownFormatJWEJWSJWKJWKSJWT"
var _FormatKind_index = [...]uint8{0, 13, 26, 29, 32, 35, 39, 42}
func (i FormatKind) String() string {
if i < 0 || i >= FormatKind(len(_FormatKind_index)-1) {
return "FormatKind(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _FormatKind_name[_FormatKind_index[i]:_FormatKind_index[i+1]]
}

View File

@@ -0,0 +1,21 @@
load("@rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "base64",
srcs = ["base64.go"],
importpath = "github.com/lestrrat-go/jwx/v3/internal/base64",
visibility = ["//:__subpackages__"],
)
go_test(
name = "base64_test",
srcs = ["base64_test.go"],
embed = [":base64"],
deps = ["@com_github_stretchr_testify//require"],
)
alias(
name = "go_default_library",
actual = ":base64",
visibility = ["//:__subpackages__"],
)

View File

@@ -0,0 +1,51 @@
//go:build jwx_asmbase64
package base64
import (
"fmt"
"slices"
asmbase64 "github.com/segmentio/asm/base64"
)
func init() {
SetEncoder(asmEncoder{asmbase64.RawURLEncoding})
SetDecoder(asmDecoder{})
}
type asmEncoder struct {
*asmbase64.Encoding
}
func (e asmEncoder) AppendEncode(dst, src []byte) []byte {
n := e.Encoding.EncodedLen(len(src))
dst = slices.Grow(dst, n)
e.Encoding.Encode(dst[len(dst):][:n], src)
return dst[:len(dst)+n]
}
type asmDecoder struct{}
func (d asmDecoder) Decode(src []byte) ([]byte, error) {
var enc *asmbase64.Encoding
switch Guess(src) {
case Std:
enc = asmbase64.StdEncoding
case RawStd:
enc = asmbase64.RawStdEncoding
case URL:
enc = asmbase64.URLEncoding
case RawURL:
enc = asmbase64.RawURLEncoding
default:
return nil, fmt.Errorf(`invalid encoding`)
}
dst := make([]byte, enc.DecodedLen(len(src)))
n, err := enc.Decode(dst, src)
if err != nil {
return nil, fmt.Errorf(`failed to decode source: %w`, err)
}
return dst[:n], nil
}

View File

@@ -0,0 +1,139 @@
package base64
import (
"bytes"
"encoding/base64"
"encoding/binary"
"fmt"
"sync"
)
type Decoder interface {
Decode([]byte) ([]byte, error)
}
type Encoder interface {
Encode([]byte, []byte)
EncodedLen(int) int
EncodeToString([]byte) string
AppendEncode([]byte, []byte) []byte
}
var muEncoder sync.RWMutex
var encoder Encoder = base64.RawURLEncoding
var muDecoder sync.RWMutex
var decoder Decoder = defaultDecoder{}
func SetEncoder(enc Encoder) {
muEncoder.Lock()
defer muEncoder.Unlock()
encoder = enc
}
func getEncoder() Encoder {
muEncoder.RLock()
defer muEncoder.RUnlock()
return encoder
}
func DefaultEncoder() Encoder {
return getEncoder()
}
func SetDecoder(dec Decoder) {
muDecoder.Lock()
defer muDecoder.Unlock()
decoder = dec
}
func getDecoder() Decoder {
muDecoder.RLock()
defer muDecoder.RUnlock()
return decoder
}
func Encode(src []byte) []byte {
encoder := getEncoder()
dst := make([]byte, encoder.EncodedLen(len(src)))
encoder.Encode(dst, src)
return dst
}
func EncodeToString(src []byte) string {
return getEncoder().EncodeToString(src)
}
func EncodeUint64ToString(v uint64) string {
data := make([]byte, 8)
binary.BigEndian.PutUint64(data, v)
i := 0
for ; i < len(data); i++ {
if data[i] != 0x0 {
break
}
}
return EncodeToString(data[i:])
}
const (
InvalidEncoding = iota
Std
URL
RawStd
RawURL
)
func Guess(src []byte) int {
var isRaw = !bytes.HasSuffix(src, []byte{'='})
var isURL = !bytes.ContainsAny(src, "+/")
switch {
case isRaw && isURL:
return RawURL
case isURL:
return URL
case isRaw:
return RawStd
default:
return Std
}
}
// defaultDecoder is a Decoder that detects the encoding of the source and
// decodes it accordingly. This shouldn't really be required per the spec, but
// it exist because we have seen in the wild JWTs that are encoded using
// various versions of the base64 encoding.
type defaultDecoder struct{}
func (defaultDecoder) Decode(src []byte) ([]byte, error) {
var enc *base64.Encoding
switch Guess(src) {
case RawURL:
enc = base64.RawURLEncoding
case URL:
enc = base64.URLEncoding
case RawStd:
enc = base64.RawStdEncoding
case Std:
enc = base64.StdEncoding
default:
return nil, fmt.Errorf(`invalid encoding`)
}
dst := make([]byte, enc.DecodedLen(len(src)))
n, err := enc.Decode(dst, src)
if err != nil {
return nil, fmt.Errorf(`failed to decode source: %w`, err)
}
return dst[:n], nil
}
func Decode(src []byte) ([]byte, error) {
return getDecoder().Decode(src)
}
func DecodeString(src string) ([]byte, error) {
return getDecoder().Decode([]byte(src))
}

View File

@@ -0,0 +1,14 @@
load("@rules_go//go:def.bzl", "go_library")
go_library(
name = "ecutil",
srcs = ["ecutil.go"],
importpath = "github.com/lestrrat-go/jwx/v3/internal/ecutil",
visibility = ["//:__subpackages__"],
)
alias(
name = "go_default_library",
actual = ":ecutil",
visibility = ["//:__subpackages__"],
)

View File

@@ -0,0 +1,76 @@
// Package ecutil defines tools that help with elliptic curve related
// computation
package ecutil
import (
"crypto/elliptic"
"math/big"
"sync"
)
const (
// size of buffer that needs to be allocated for EC521 curve
ec521BufferSize = 66 // (521 / 8) + 1
)
var ecpointBufferPool = sync.Pool{
New: func() any {
// In most cases the curve bit size will be less than this length
// so allocate the maximum, and keep reusing
buf := make([]byte, 0, ec521BufferSize)
return &buf
},
}
func getCrvFixedBuffer(size int) []byte {
//nolint:forcetypeassert
buf := *(ecpointBufferPool.Get().(*[]byte))
if size > ec521BufferSize && cap(buf) < size {
buf = append(buf, make([]byte, size-cap(buf))...)
}
return buf[:size]
}
// ReleaseECPointBuffer releases the []byte buffer allocated.
func ReleaseECPointBuffer(buf []byte) {
buf = buf[:cap(buf)]
buf[0] = 0x0
for i := 1; i < len(buf); i *= 2 {
copy(buf[i:], buf[:i])
}
buf = buf[:0]
ecpointBufferPool.Put(&buf)
}
func CalculateKeySize(crv elliptic.Curve) int {
// We need to create a buffer that fits the entire curve.
// If the curve size is 66, that fits in 9 bytes. If the curve
// size is 64, it fits in 8 bytes.
bits := crv.Params().BitSize
// For most common cases we know before hand what the byte length
// is going to be. optimize
var inBytes int
switch bits {
case 224, 256, 384: // TODO: use constant?
inBytes = bits / 8
case 521:
inBytes = ec521BufferSize
default:
inBytes = bits / 8
if (bits % 8) != 0 {
inBytes++
}
}
return inBytes
}
// AllocECPointBuffer allocates a buffer for the given point in the given
// curve. This buffer should be released using the ReleaseECPointBuffer
// function.
func AllocECPointBuffer(v *big.Int, crv elliptic.Curve) []byte {
buf := getCrvFixedBuffer(CalculateKeySize(crv))
v.FillBytes(buf)
return buf
}

View File

@@ -0,0 +1,19 @@
load("@rules_go//go:def.bzl", "go_library")
go_library(
name = "json",
srcs = [
"json.go",
"registry.go",
"stdlib.go",
],
importpath = "github.com/lestrrat-go/jwx/v3/internal/json",
visibility = ["//:__subpackages__"],
deps = ["//internal/base64"],
)
alias(
name = "go_default_library",
actual = ":json",
visibility = ["//:__subpackages__"],
)

View File

@@ -0,0 +1,49 @@
//go:build jwx_goccy
// +build jwx_goccy
package json
import (
"io"
"github.com/goccy/go-json"
)
type Decoder = json.Decoder
type Delim = json.Delim
type Encoder = json.Encoder
type Marshaler = json.Marshaler
type Number = json.Number
type RawMessage = json.RawMessage
type Unmarshaler = json.Unmarshaler
func Engine() string {
return "github.com/goccy/go-json"
}
// NewDecoder respects the values specified in DecoderSettings,
// and creates a Decoder that has certain features turned on/off
func NewDecoder(r io.Reader) *json.Decoder {
dec := json.NewDecoder(r)
if UseNumber() {
dec.UseNumber()
}
return dec
}
// NewEncoder is just a proxy for "encoding/json".NewEncoder
func NewEncoder(w io.Writer) *json.Encoder {
return json.NewEncoder(w)
}
// Marshal is just a proxy for "encoding/json".Marshal
func Marshal(v any) ([]byte, error) {
return json.Marshal(v)
}
// MarshalIndent is just a proxy for "encoding/json".MarshalIndent
func MarshalIndent(v any, prefix, indent string) ([]byte, error) {
return json.MarshalIndent(v, prefix, indent)
}

View File

@@ -0,0 +1,127 @@
package json
import (
"bytes"
"fmt"
"os"
"sync/atomic"
"github.com/lestrrat-go/jwx/v3/internal/base64"
)
var useNumber uint32 // TODO: at some point, change to atomic.Bool
func UseNumber() bool {
return atomic.LoadUint32(&useNumber) == 1
}
// Sets the global configuration for json decoding
func DecoderSettings(inUseNumber bool) {
var val uint32
if inUseNumber {
val = 1
}
atomic.StoreUint32(&useNumber, val)
}
// Unmarshal respects the values specified in DecoderSettings,
// and uses a Decoder that has certain features turned on/off
func Unmarshal(b []byte, v any) error {
dec := NewDecoder(bytes.NewReader(b))
return dec.Decode(v)
}
func AssignNextBytesToken(dst *[]byte, dec *Decoder) error {
var val string
if err := dec.Decode(&val); err != nil {
return fmt.Errorf(`error reading next value: %w`, err)
}
buf, err := base64.DecodeString(val)
if err != nil {
return fmt.Errorf(`expected base64 encoded []byte (%T)`, val)
}
*dst = buf
return nil
}
func ReadNextStringToken(dec *Decoder) (string, error) {
var val string
if err := dec.Decode(&val); err != nil {
return "", fmt.Errorf(`error reading next value: %w`, err)
}
return val, nil
}
func AssignNextStringToken(dst **string, dec *Decoder) error {
val, err := ReadNextStringToken(dec)
if err != nil {
return err
}
*dst = &val
return nil
}
// FlattenAudience is a flag to specify if we should flatten the "aud"
// entry to a string when there's only one entry.
// In jwx < 1.1.8 we just dumped everything as an array of strings,
// but apparently AWS Cognito doesn't handle this well.
//
// So now we have the ability to dump "aud" as a string if there's
// only one entry, but we need to retain the old behavior so that
// we don't accidentally break somebody else's code. (e.g. messing
// up how signatures are calculated)
var FlattenAudience uint32
func MarshalAudience(aud []string, flatten bool) ([]byte, error) {
var val any
if len(aud) == 1 && flatten {
val = aud[0]
} else {
val = aud
}
return Marshal(val)
}
func EncodeAudience(enc *Encoder, aud []string, flatten bool) error {
var val any
if len(aud) == 1 && flatten {
val = aud[0]
} else {
val = aud
}
return enc.Encode(val)
}
// DecodeCtx is an interface for objects that needs that extra something
// when decoding JSON into an object.
type DecodeCtx interface {
Registry() *Registry
}
// DecodeCtxContainer is used to differentiate objects that can carry extra
// decoding hints and those who can't.
type DecodeCtxContainer interface {
DecodeCtx() DecodeCtx
SetDecodeCtx(DecodeCtx)
}
// stock decodeCtx. should cover 80% of the cases
type decodeCtx struct {
registry *Registry
}
func NewDecodeCtx(r *Registry) DecodeCtx {
return &decodeCtx{registry: r}
}
func (dc *decodeCtx) Registry() *Registry {
return dc.registry
}
func Dump(v any) {
enc := NewEncoder(os.Stdout)
enc.SetIndent("", " ")
//nolint:errchkjson
_ = enc.Encode(v)
}

View File

@@ -0,0 +1,90 @@
package json
import (
"fmt"
"reflect"
"sync"
)
// CustomDecoder is the interface we expect from RegisterCustomField in jws, jwe, jwk, and jwt packages.
type CustomDecoder interface {
// Decode takes a JSON encoded byte slice and returns the desired
// decoded value,which will be used as the value for that field
// registered through RegisterCustomField
Decode([]byte) (any, error)
}
// CustomDecodeFunc is a stateless, function-based implementation of CustomDecoder
type CustomDecodeFunc func([]byte) (any, error)
func (fn CustomDecodeFunc) Decode(data []byte) (any, error) {
return fn(data)
}
type objectTypeDecoder struct {
typ reflect.Type
name string
}
func (dec *objectTypeDecoder) Decode(data []byte) (any, error) {
ptr := reflect.New(dec.typ).Interface()
if err := Unmarshal(data, ptr); err != nil {
return nil, fmt.Errorf(`failed to decode field %s: %w`, dec.name, err)
}
return reflect.ValueOf(ptr).Elem().Interface(), nil
}
type Registry struct {
mu *sync.RWMutex
ctrs map[string]CustomDecoder
}
func NewRegistry() *Registry {
return &Registry{
mu: &sync.RWMutex{},
ctrs: make(map[string]CustomDecoder),
}
}
func (r *Registry) Register(name string, object any) {
if object == nil {
r.mu.Lock()
defer r.mu.Unlock()
delete(r.ctrs, name)
return
}
r.mu.Lock()
defer r.mu.Unlock()
if ctr, ok := object.(CustomDecoder); ok {
r.ctrs[name] = ctr
} else {
r.ctrs[name] = &objectTypeDecoder{
typ: reflect.TypeOf(object),
name: name,
}
}
}
func (r *Registry) Decode(dec *Decoder, name string) (any, error) {
r.mu.RLock()
defer r.mu.RUnlock()
if ctr, ok := r.ctrs[name]; ok {
var raw RawMessage
if err := dec.Decode(&raw); err != nil {
return nil, fmt.Errorf(`failed to decode field %s: %w`, name, err)
}
v, err := ctr.Decode([]byte(raw))
if err != nil {
return nil, fmt.Errorf(`failed to decode field %s: %w`, name, err)
}
return v, nil
}
var decoded any
if err := dec.Decode(&decoded); err != nil {
return nil, fmt.Errorf(`failed to decode field %s: %w`, name, err)
}
return decoded, nil
}

View File

@@ -0,0 +1,47 @@
//go:build !jwx_goccy
// +build !jwx_goccy
package json
import (
"encoding/json"
"io"
)
type Decoder = json.Decoder
type Delim = json.Delim
type Encoder = json.Encoder
type Marshaler = json.Marshaler
type Number = json.Number
type RawMessage = json.RawMessage
type Unmarshaler = json.Unmarshaler
func Engine() string {
return "encoding/json"
}
// NewDecoder respects the values specified in DecoderSettings,
// and creates a Decoder that has certain features turned on/off
func NewDecoder(r io.Reader) *json.Decoder {
dec := json.NewDecoder(r)
if UseNumber() {
dec.UseNumber()
}
return dec
}
func NewEncoder(w io.Writer) *json.Encoder {
return json.NewEncoder(w)
}
// Marshal is just a proxy for "encoding/json".Marshal
func Marshal(v any) ([]byte, error) {
return json.Marshal(v)
}
// MarshalIndent is just a proxy for "encoding/json".MarshalIndent
func MarshalIndent(v any, prefix, indent string) ([]byte, error) {
return json.MarshalIndent(v, prefix, indent)
}

View File

@@ -0,0 +1,8 @@
load("@rules_go//go:def.bzl", "go_library")
go_library(
name = "jwxio",
srcs = ["jwxio.go"],
importpath = "github.com/lestrrat-go/jwx/v3/internal/jwxio",
visibility = ["//:__subpackages__"],
)

View File

@@ -0,0 +1,29 @@
package jwxio
import (
"bytes"
"errors"
"io"
"strings"
)
var errNonFiniteSource = errors.New(`cannot read from non-finite source`)
func NonFiniteSourceError() error {
return errNonFiniteSource
}
// ReadAllFromFiniteSource reads all data from a io.Reader _if_ it comes from a
// finite source.
func ReadAllFromFiniteSource(rdr io.Reader) ([]byte, error) {
switch rdr.(type) {
case *bytes.Reader, *bytes.Buffer, *strings.Reader:
data, err := io.ReadAll(rdr)
if err != nil {
return nil, err
}
return data, nil
default:
return nil, errNonFiniteSource
}
}

View File

@@ -0,0 +1,31 @@
load("@rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "keyconv",
srcs = ["keyconv.go"],
importpath = "github.com/lestrrat-go/jwx/v3/internal/keyconv",
visibility = ["//:__subpackages__"],
deps = [
"//jwk",
"@com_github_lestrrat_go_blackmagic//:blackmagic",
"@org_golang_x_crypto//ed25519",
],
)
go_test(
name = "keyconv_test",
srcs = ["keyconv_test.go"],
deps = [
":keyconv",
"//internal/jwxtest",
"//jwa",
"//jwk",
"@com_github_stretchr_testify//require",
],
)
alias(
name = "go_default_library",
actual = ":keyconv",
visibility = ["//:__subpackages__"],
)

Some files were not shown because too many files have changed in this diff Show More