diff --git a/go.mod b/go.mod index 50f8afaf103..efb64439774 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ toolchain go1.24.9 require ( dario.cat/mergo v1.0.2 github.com/CiscoM31/godata v1.0.10 - github.com/KimMachineGun/automemlimit v0.7.4 + github.com/KimMachineGun/automemlimit v0.7.5 github.com/Masterminds/semver v1.5.0 github.com/MicahParks/keyfunc/v2 v2.1.0 github.com/Nerzal/gocloak/v13 v13.9.0 @@ -15,13 +15,13 @@ require ( github.com/beevik/etree v1.6.0 github.com/blevesearch/bleve/v2 v2.5.7 github.com/cenkalti/backoff v2.2.1+incompatible - github.com/coreos/go-oidc/v3 v3.16.0 + github.com/coreos/go-oidc/v3 v3.17.0 github.com/cs3org/go-cs3apis v0.0.0-20241105092511-3ad35d174fc1 github.com/davidbyttow/govips/v2 v2.16.0 github.com/dhowden/tag v0.0.0-20240417053706-3d75831295e8 github.com/dutchcoders/go-clamd v0.0.0-20170520113014-b970184f4d9e github.com/egirna/icap-client v0.1.1 - github.com/gabriel-vasile/mimetype v1.4.10 + github.com/gabriel-vasile/mimetype v1.4.12 github.com/ggwhite/go-masker v1.1.0 github.com/go-chi/chi/v5 v5.2.4 github.com/go-chi/render v1.0.3 @@ -35,7 +35,7 @@ require ( github.com/go-micro/plugins/v4/store/nats-js-kv v0.0.0-20240726082623-6831adfdcdc4 github.com/go-micro/plugins/v4/wrapper/monitoring/prometheus v1.2.0 github.com/go-micro/plugins/v4/wrapper/trace/opentelemetry v1.2.0 - github.com/go-playground/validator/v10 v10.28.0 + github.com/go-playground/validator/v10 v10.30.1 github.com/gofrs/uuid v4.4.0+incompatible github.com/golang-jwt/jwt/v5 v5.3.0 github.com/golang/protobuf v1.5.4 @@ -57,16 +57,16 @@ require ( github.com/mitchellh/mapstructure v1.5.0 github.com/mna/pigeon v1.3.0 github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 - github.com/nats-io/nats-server/v2 v2.12.1 - github.com/nats-io/nats.go v1.46.1 + github.com/nats-io/nats-server/v2 v2.12.3 + github.com/nats-io/nats.go v1.48.0 github.com/olekukonko/tablewriter v1.1.0 github.com/onsi/ginkgo v1.16.5 - github.com/onsi/ginkgo/v2 v2.27.2 - github.com/onsi/gomega v1.38.2 + github.com/onsi/ginkgo/v2 v2.27.4 + github.com/onsi/gomega v1.38.3 github.com/open-policy-agent/opa v1.10.1 github.com/orcaman/concurrent-map v1.0.0 github.com/owncloud/libre-graph-api-go v1.0.5-0.20251107084958-31937a4ea3f1 - github.com/owncloud/reva/v2 v2.0.0-20251107154850-a122a9538794 + github.com/owncloud/reva/v2 v2.0.0-20260116122933-81e6e21256eb github.com/pkg/errors v0.9.1 github.com/pkg/xattr v0.4.12 github.com/prometheus/client_golang v1.23.2 @@ -74,7 +74,7 @@ require ( github.com/rogpeppe/go-internal v1.14.1 github.com/rs/cors v1.11.1 github.com/rs/zerolog v1.34.0 - github.com/shamaton/msgpack/v2 v2.3.1 + github.com/shamaton/msgpack/v2 v2.4.0 github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af github.com/spf13/afero v1.15.0 github.com/spf13/cobra v1.10.1 @@ -88,25 +88,25 @@ require ( github.com/xhit/go-simple-mail/v2 v2.16.0 go-micro.dev/v4 v4.11.0 go.etcd.io/bbolt v1.4.2 - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.64.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 go.opentelemetry.io/contrib/zpages v0.63.0 - go.opentelemetry.io/otel v1.38.0 + go.opentelemetry.io/otel v1.39.0 go.opentelemetry.io/otel/exporters/jaeger v1.17.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 - go.opentelemetry.io/otel/sdk v1.38.0 - go.opentelemetry.io/otel/trace v1.38.0 - golang.org/x/crypto v0.45.0 + go.opentelemetry.io/otel/sdk v1.39.0 + go.opentelemetry.io/otel/trace v1.39.0 + golang.org/x/crypto v0.46.0 golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b golang.org/x/image v0.32.0 golang.org/x/net v0.47.0 golang.org/x/oauth2 v0.34.0 - golang.org/x/sync v0.18.0 + golang.org/x/sync v0.19.0 golang.org/x/term v0.39.0 - golang.org/x/text v0.31.0 - google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 - google.golang.org/grpc v1.76.0 - google.golang.org/protobuf v1.36.10 + golang.org/x/text v0.32.0 + google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 + google.golang.org/grpc v1.77.0 + google.golang.org/protobuf v1.36.11 gopkg.in/yaml.v2 v2.4.0 gotest.tools/v3 v3.5.2 stash.kopano.io/kgol/rndm v1.1.2 @@ -116,7 +116,7 @@ require ( contrib.go.opencensus.io/exporter/prometheus v0.4.2 // indirect filippo.io/edwards25519 v1.1.0 // indirect github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect - github.com/BurntSushi/toml v1.5.0 // indirect + github.com/BurntSushi/toml v1.6.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver/v3 v3.4.0 // indirect github.com/Masterminds/sprig v2.22.0+incompatible // indirect @@ -127,7 +127,7 @@ require ( github.com/ajg/form v1.5.1 // indirect github.com/alexedwards/argon2id v1.0.0 // indirect github.com/amoghe/go-crypt v0.0.0-20220222110647-20eada5f5964 // indirect - github.com/antithesishq/antithesis-sdk-go v0.4.3-default-no-op // indirect + github.com/antithesishq/antithesis-sdk-go v0.5.0-default-no-op // indirect github.com/armon/go-radix v1.0.0 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/aws/aws-sdk-go v1.55.8 // indirect @@ -154,12 +154,12 @@ require ( github.com/bluele/gcache v0.0.2 // indirect github.com/bombsimon/logrusr/v3 v3.1.0 // indirect github.com/cenkalti/backoff/v5 v5.0.3 // indirect - github.com/ceph/go-ceph v0.36.0 // indirect + github.com/ceph/go-ceph v0.37.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cevaris/ordered_map v0.0.0-20190319150403-3adeae072e73 // indirect github.com/cloudflare/circl v1.6.1 // indirect github.com/coreos/go-semver v0.3.1 // indirect - github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/coreos/go-systemd/v22 v22.6.0 // indirect github.com/cornelk/hashmap v1.0.8 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect github.com/crewjam/httperr v0.2.0 // indirect @@ -212,16 +212,16 @@ require ( github.com/gobwas/ws v1.2.1 // indirect github.com/goccy/go-json v0.10.5 // indirect github.com/goccy/go-yaml v1.18.0 // indirect - github.com/gofrs/flock v0.12.1 // indirect + github.com/gofrs/flock v0.13.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/snappy v0.0.4 // indirect - github.com/gomodule/redigo v1.9.2 // indirect + github.com/gomodule/redigo v1.9.3 // indirect github.com/google/go-querystring v1.1.0 // indirect - github.com/google/go-tpm v0.9.6 // indirect + github.com/google/go-tpm v0.9.7 // indirect github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect - github.com/google/renameio/v2 v2.0.0 // indirect + github.com/google/renameio/v2 v2.0.1 // indirect github.com/gookit/goutil v0.7.1 // indirect github.com/gorilla/handlers v1.5.1 // indirect github.com/gorilla/schema v1.4.1 // indirect @@ -240,8 +240,9 @@ require ( github.com/juliangruber/go-intersect v1.1.0 // indirect github.com/kettek/apng v0.0.0-20250827064933-2bb5f5fcf253 // indirect github.com/kevinburke/ssh_config v1.2.0 // indirect - github.com/klauspost/compress v1.18.0 // indirect + github.com/klauspost/compress v1.18.2 // indirect github.com/klauspost/cpuid/v2 v2.2.11 // indirect + github.com/klauspost/crc32 v1.3.0 // indirect github.com/kovidgoyal/go-parallel v1.1.1 // indirect github.com/leodido/go-urn v1.4.0 // indirect github.com/lestrrat-go/blackmagic v1.0.4 // indirect @@ -259,15 +260,15 @@ require ( github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.16 // indirect - github.com/mattn/go-sqlite3 v1.14.32 // indirect + github.com/mattn/go-sqlite3 v1.14.33 // indirect github.com/maxymania/go-system v0.0.0-20170110133659-647cc364bf0b // indirect github.com/mendsley/gojwk v0.0.0-20141217222730-4d5ec6e58103 // indirect github.com/miekg/dns v1.1.67 // indirect github.com/mileusna/useragent v1.3.5 // indirect - github.com/minio/crc64nvme v1.0.2 // indirect - github.com/minio/highwayhash v1.0.3 // indirect + github.com/minio/crc64nvme v1.1.0 // indirect + github.com/minio/highwayhash v1.0.4-0.20251030100505-070ab1a87a76 // indirect github.com/minio/md5-simd v1.1.2 // indirect - github.com/minio/minio-go/v7 v7.0.95 // indirect + github.com/minio/minio-go/v7 v7.0.97 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect @@ -275,10 +276,10 @@ require ( github.com/mschoch/smat v0.2.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/nats-io/jwt/v2 v2.8.0 // indirect - github.com/nats-io/nkeys v0.4.11 // indirect + github.com/nats-io/nkeys v0.4.12 // indirect github.com/nats-io/nuid v1.0.1 // indirect github.com/nxadm/tail v1.4.8 // indirect - github.com/oklog/run v1.1.0 // indirect + github.com/oklog/run v1.2.0 // indirect github.com/olekukonko/errors v1.1.0 // indirect github.com/olekukonko/ll v0.0.9 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect @@ -291,9 +292,9 @@ require ( github.com/pjbgf/sha1cd v0.3.0 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/pquerna/cachecontrol v0.2.0 // indirect - github.com/prometheus/alertmanager v0.28.1 // indirect + github.com/prometheus/alertmanager v0.30.0 // indirect github.com/prometheus/client_model v0.6.2 // indirect - github.com/prometheus/common v0.66.1 // indirect + github.com/prometheus/common v0.67.4 // indirect github.com/prometheus/procfs v0.17.0 // indirect github.com/prometheus/statsd_exporter v0.22.8 // indirect github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 // indirect @@ -329,24 +330,24 @@ require ( github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect github.com/yashtewari/glob-intersection v0.2.0 // indirect - go.etcd.io/etcd/api/v3 v3.6.5 // indirect - go.etcd.io/etcd/client/pkg/v3 v3.6.5 // indirect - go.etcd.io/etcd/client/v3 v3.6.5 // indirect + go.etcd.io/etcd/api/v3 v3.6.7 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.6.7 // indirect + go.etcd.io/etcd/client/v3 v3.6.7 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect - go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/otel/metric v1.39.0 // indirect go.opentelemetry.io/proto/otlp v1.7.1 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - go.yaml.in/yaml/v2 v2.4.2 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect - golang.org/x/mod v0.29.0 // indirect + golang.org/x/mod v0.30.0 // indirect golang.org/x/sys v0.40.0 // indirect golang.org/x/time v0.14.0 // indirect - golang.org/x/tools v0.38.0 // indirect + golang.org/x/tools v0.39.0 // indirect google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 // indirect gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect diff --git a/go.sum b/go.sum index 4676b5fcf52..eec7116273e 100644 --- a/go.sum +++ b/go.sum @@ -44,15 +44,15 @@ github.com/Acconut/go-httptest-recorder v1.0.0/go.mod h1:CwQyhTH1kq/gLyWiRieo7c0 github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8= github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= -github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/BurntSushi/toml v1.6.0 h1:dRaEfpa2VI55EwlIW72hMRHdWouJeRF7TPYhI+AUQjk= +github.com/BurntSushi/toml v1.6.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/CiscoM31/godata v1.0.10 h1:DZdJ6M8QNh4HquvDDOqNLu6h77Wl86KGK7Qlbmb90sk= github.com/CiscoM31/godata v1.0.10/go.mod h1:ZMiT6JuD3Rm83HEtiTx4JEChsd25YCrxchKGag/sdTc= github.com/DeepDiver1975/secure v0.0.0-20240611112133-abc838fb797c h1:ocsNvQ2tNHme4v/lTs17HROamc7mFzZfzWcg4m+UXN0= github.com/DeepDiver1975/secure v0.0.0-20240611112133-abc838fb797c/go.mod h1:BmF5hyM6tXczk3MpQkFf1hpKSRqCyhqcbiQtiAF7+40= -github.com/KimMachineGun/automemlimit v0.7.4 h1:UY7QYOIfrr3wjjOAqahFmC3IaQCLWvur9nmfIn6LnWk= -github.com/KimMachineGun/automemlimit v0.7.4/go.mod h1:QZxpHaGOQoYvFhv/r4u3U0JTC2ZcOwbSr11UZF46UBM= +github.com/KimMachineGun/automemlimit v0.7.5 h1:RkbaC0MwhjL1ZuBKunGDjE/ggwAX43DwZrJqVwyveTk= +github.com/KimMachineGun/automemlimit v0.7.5/go.mod h1:QZxpHaGOQoYvFhv/r4u3U0JTC2ZcOwbSr11UZF46UBM= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= @@ -92,8 +92,8 @@ github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNg github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= -github.com/antithesishq/antithesis-sdk-go v0.4.3-default-no-op h1:+OSa/t11TFhqfrX0EOSqQBDJ0YlpmK0rDSiB19dg9M0= -github.com/antithesishq/antithesis-sdk-go v0.4.3-default-no-op/go.mod h1:IUpT2DPAKh6i/YhSbt6Gl3v2yvUZjmKncl7U91fup7E= +github.com/antithesishq/antithesis-sdk-go v0.5.0-default-no-op h1:Ucf+QxEKMbPogRO5guBNe5cgd9uZgfoJLOYs8WWhtjM= +github.com/antithesishq/antithesis-sdk-go v0.5.0-default-no-op/go.mod h1:IUpT2DPAKh6i/YhSbt6Gl3v2yvUZjmKncl7U91fup7E= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= @@ -170,8 +170,8 @@ github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QH github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/ceph/go-ceph v0.36.0 h1:IDE4vEF+4fmjve+CPjD1WStgfQ+Lh6vD+9PMUI712KI= -github.com/ceph/go-ceph v0.36.0/go.mod h1:fGCbndVDLuHW7q2954d6y+tgPFOBnRLqJRe2YXyngw4= +github.com/ceph/go-ceph v0.37.0 h1:KXliBe3ZDr3/AtfY7n9d1MG7ippYNCVhMPcAgm05CFI= +github.com/ceph/go-ceph v0.37.0/go.mod h1:3y2tOlITlyuVFhy8v6PpCEfjMwKPfXJiH0/2hKZZQRE= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= @@ -185,12 +185,13 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/coreos/go-oidc/v3 v3.16.0 h1:qRQUCFstKpXwmEjDQTIbyY/5jF00+asXzSkmkoa/mow= -github.com/coreos/go-oidc/v3 v3.16.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8= +github.com/coreos/go-oidc/v3 v3.17.0 h1:hWBGaQfbi0iVviX4ibC7bk8OKT5qNr4klBaCHVNvehc= +github.com/coreos/go-oidc/v3 v3.17.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= -github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.6.0 h1:aGVa/v8B7hpb0TKl0MWoAavPDmHvobFe5R5zn0bCJWo= +github.com/coreos/go-systemd/v22 v22.6.0/go.mod h1:iG+pp635Fo7ZmV/j14KUcmEyWF+0X7Lua8rrTWzYgWU= github.com/cornelk/hashmap v1.0.8 h1:nv0AWgw02n+iDcawr5It4CjQIAcdMMKRrs10HOJYlrc= github.com/cornelk/hashmap v1.0.8/go.mod h1:RfZb7JO3RviW/rT6emczVuC/oxpdz4UsSB2LJSclR1k= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= @@ -265,8 +266,8 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= -github.com/gabriel-vasile/mimetype v1.4.10 h1:zyueNbySn/z8mJZHLt6IPw0KoZsiQNszIpU+bX4+ZK0= -github.com/gabriel-vasile/mimetype v1.4.10/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s= +github.com/gabriel-vasile/mimetype v1.4.12 h1:e9hWvmLYvtp846tLHam2o++qitpguFiYCKbn0w9jyqw= +github.com/gabriel-vasile/mimetype v1.4.12/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s= github.com/gdexlab/go-render v1.0.1 h1:rxqB3vo5s4n1kF0ySmoNeSPRYkEsyHgln4jFIQY7v0U= github.com/gdexlab/go-render v1.0.1/go.mod h1:wRi5nW2qfjiGj4mPukH4UV0IknS1cHD4VgFTmJX5JzM= github.com/ggwhite/go-masker v1.1.0 h1:kN/KIvktu2U+hd3KWrSlLj7xBGD1iBfc9/xdbVgFbRc= @@ -358,8 +359,8 @@ github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/o github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= -github.com/go-playground/validator/v10 v10.28.0 h1:Q7ibns33JjyW48gHkuFT91qX48KG0ktULL6FgHdG688= -github.com/go-playground/validator/v10 v10.28.0/go.mod h1:GoI6I1SjPBh9p7ykNE/yj3fFYbyDOpwMn5KXd+m2hUU= +github.com/go-playground/validator/v10 v10.30.1 h1:f3zDSN/zOma+w6+1Wswgd9fLkdwy06ntQJp0BBvFG0w= +github.com/go-playground/validator/v10 v10.30.1/go.mod h1:oSuBIQzuJxL//3MelwSLD5hc2Tu889bF0Idm9Dg26cM= github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= github.com/go-resty/resty/v2 v2.16.5 h1:hBKqmWrr7uRc3euHVqmh1HTHcKn99Smr7o5spptdhTM= @@ -389,12 +390,12 @@ github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PU github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw= github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= -github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= +github.com/gofrs/flock v0.13.0 h1:95JolYOvGMqeH31+FC7D2+uULf6mG61mEZ/A8dRYMzw= +github.com/gofrs/flock v0.13.0/go.mod h1:jxeyy9R1auM5S6JYDBhDt+E2TCo7DkratH4Pgi8P+Z0= github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA= github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gofrs/uuid/v5 v5.3.2 h1:2jfO8j3XgSwlz/wHqemAEugfnTlikAYHhnqQ8Xh4fE0= -github.com/gofrs/uuid/v5 v5.3.2/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8= +github.com/gofrs/uuid/v5 v5.4.0 h1:EfbpCTjqMuGyq5ZJwxqzn3Cbr2d0rUZU7v5ycAk/e/0= +github.com/gofrs/uuid/v5 v5.4.0/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= @@ -438,8 +439,8 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/gomodule/redigo v1.9.2 h1:HrutZBLhSIU8abiSfW8pj8mPhOyMYjZT/wcA4/L9L9s= -github.com/gomodule/redigo v1.9.2/go.mod h1:KsU3hiK/Ay8U42qpaJk+kuNa3C+spxapWpM+ywhcgtw= +github.com/gomodule/redigo v1.9.3 h1:dNPSXeXv6HCq2jdyWfjgmhBdqnR6PRO3m/G05nvpPC8= +github.com/gomodule/redigo v1.9.3/go.mod h1:KsU3hiK/Ay8U42qpaJk+kuNa3C+spxapWpM+ywhcgtw= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/flatbuffers v25.2.10+incompatible h1:F3vclr7C3HpB1k9mxCGRMXq6FdUalZ6H/pNX4FP1v0Q= @@ -464,8 +465,8 @@ github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/go-tika v0.3.1 h1:l+jr10hDhZjcgxFRfcQChRLo1bPXQeLFluMyvDhXTTA= github.com/google/go-tika v0.3.1/go.mod h1:DJh5N8qxXIl85QkqmXknd+PeeRkUOTbvwyYf7ieDz6c= -github.com/google/go-tpm v0.9.6 h1:Ku42PT4LmjDu1H5C5ISWLlpI1mj+Zq7sPGKoRw2XROA= -github.com/google/go-tpm v0.9.6/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY= +github.com/google/go-tpm v0.9.7 h1:u89J4tUUeDTlH8xxC3CTW7OHZjbjKoHdQ9W7gCUhtxA= +github.com/google/go-tpm v0.9.7/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -479,8 +480,8 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= -github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= +github.com/google/renameio/v2 v2.0.1 h1:HyOM6qd9gF9sf15AvhbptGHUnaLTpEI9akAFFU3VyW0= +github.com/google/renameio/v2 v2.0.1/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -575,11 +576,13 @@ github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4 github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= -github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk= +github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.11 h1:0OwqZRYI2rFrjS4kvkDnqJkKHdHaRnCm68/DY4OxRzU= github.com/klauspost/cpuid/v2 v2.2.11/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/klauspost/crc32 v1.3.0 h1:sSmTt3gUt81RP655XGZPElI0PelVTZ6YwCRnPSupoFM= +github.com/klauspost/crc32 v1.3.0/go.mod h1:D7kQaZhnkX/Y0tstFGf8VUzv2UofNGqCjnC3zdHB0Hw= github.com/kobergj/go-micro/v4 v4.0.0-20250610135441-d0b187215699 h1:3TOdtI6WPyvBB+uCykapjRtQX8vTHMlyhINzR+58B4k= github.com/kobergj/go-micro/v4 v4.0.0-20250610135441-d0b187215699/go.mod h1:eE/tD53n3KbVrzrWxKLxdkGw45Fg1qaNLWjpJMvIUF4= github.com/kobergj/gowebdav v0.0.0-20251030165916-532350997dde h1:HYcp4J4xYe2m9KSUVbTccJb14TpSs+ldCfDFgqsXedI= @@ -652,8 +655,8 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs= -github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mattn/go-sqlite3 v1.14.33 h1:A5blZ5ulQo2AtayQ9/limgHEkFreKj1Dv226a1K73s0= +github.com/mattn/go-sqlite3 v1.14.33/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/maxymania/go-system v0.0.0-20170110133659-647cc364bf0b h1:Q53idHrTuQDDHyXaxZ6pUl0I9uyD6Z6uKFK3ocX6LzI= github.com/maxymania/go-system v0.0.0-20170110133659-647cc364bf0b/go.mod h1:KirJrATYGbTyUwVR26xIkaipRqRcMRXBf8N5dacvGus= @@ -665,14 +668,14 @@ github.com/miekg/dns v1.1.67 h1:kg0EHj0G4bfT5/oOys6HhZw4vmMlnoZ+gDu8tJ/AlI0= github.com/miekg/dns v1.1.67/go.mod h1:fujopn7TB3Pu3JM69XaawiU0wqjpL9/8xGop5UrTPps= github.com/mileusna/useragent v1.3.5 h1:SJM5NzBmh/hO+4LGeATKpaEX9+b4vcGg2qXGLiNGDws= github.com/mileusna/useragent v1.3.5/go.mod h1:3d8TOmwL/5I8pJjyVDteHtgDGcefrFUX4ccGOMKNYYc= -github.com/minio/crc64nvme v1.0.2 h1:6uO1UxGAD+kwqWWp7mBFsi5gAse66C4NXO8cmcVculg= -github.com/minio/crc64nvme v1.0.2/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg= -github.com/minio/highwayhash v1.0.3 h1:kbnuUMoHYyVl7szWjSxJnxw11k2U709jqFPPmIUyD6Q= -github.com/minio/highwayhash v1.0.3/go.mod h1:GGYsuwP/fPD6Y9hMiXuapVvlIUEhFhMTh0rxU3ik1LQ= +github.com/minio/crc64nvme v1.1.0 h1:e/tAguZ+4cw32D+IO/8GSf5UVr9y+3eJcxZI2WOO/7Q= +github.com/minio/crc64nvme v1.1.0/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg= +github.com/minio/highwayhash v1.0.4-0.20251030100505-070ab1a87a76 h1:KGuD/pM2JpL9FAYvBrnBBeENKZNh6eNtjqytV6TYjnk= +github.com/minio/highwayhash v1.0.4-0.20251030100505-070ab1a87a76/go.mod h1:GGYsuwP/fPD6Y9hMiXuapVvlIUEhFhMTh0rxU3ik1LQ= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.95 h1:ywOUPg+PebTMTzn9VDsoFJy32ZuARN9zhB+K3IYEvYU= -github.com/minio/minio-go/v7 v7.0.95/go.mod h1:wOOX3uxS334vImCNRVyIDdXX9OsXDm89ToynKgqUKlo= +github.com/minio/minio-go/v7 v7.0.97 h1:lqhREPyfgHTB/ciX8k2r8k0D93WaFqxbJX36UZq5occ= +github.com/minio/minio-go/v7 v7.0.97/go.mod h1:re5VXuo0pwEtoNLsNuSr0RrLfT/MBtohwdaSmPPSRSk= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= @@ -698,20 +701,20 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nats-io/jwt/v2 v2.8.0 h1:K7uzyz50+yGZDO5o772eRE7atlcSEENpL7P+b74JV1g= github.com/nats-io/jwt/v2 v2.8.0/go.mod h1:me11pOkwObtcBNR8AiMrUbtVOUGkqYjMQZ6jnSdVUIA= -github.com/nats-io/nats-server/v2 v2.12.1 h1:0tRrc9bzyXEdBLcHr2XEjDzVpUxWx64aZBm7Rl1QDrA= -github.com/nats-io/nats-server/v2 v2.12.1/go.mod h1:OEaOLmu/2e6J9LzUt2OuGjgNem4EpYApO5Rpf26HDs8= -github.com/nats-io/nats.go v1.46.1 h1:bqQ2ZcxVd2lpYI97xYASeRTY3I5boe/IVmuUDPitHfo= -github.com/nats-io/nats.go v1.46.1/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g= -github.com/nats-io/nkeys v0.4.11 h1:q44qGV008kYd9W1b1nEBkNzvnWxtRSQ7A8BoqRrcfa0= -github.com/nats-io/nkeys v0.4.11/go.mod h1:szDimtgmfOi9n25JpfIdGw12tZFYXqhGxjhVxsatHVE= +github.com/nats-io/nats-server/v2 v2.12.3 h1:KRv+1n7lddMVgkJPQer+pt36TcO0ENxjilBmeWdjcHs= +github.com/nats-io/nats-server/v2 v2.12.3/go.mod h1:MQXjG9WjyXKz9koWzUc3jYUMKD8x3CLmTNy91IQQz3Y= +github.com/nats-io/nats.go v1.48.0 h1:pSFyXApG+yWU/TgbKCjmm5K4wrHu86231/w84qRVR+U= +github.com/nats-io/nats.go v1.48.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g= +github.com/nats-io/nkeys v0.4.12 h1:nssm7JKOG9/x4J8II47VWCL1Ds29avyiQDRn0ckMvDc= +github.com/nats-io/nkeys v0.4.12/go.mod h1:MT59A1HYcjIcyQDJStTfaOY6vhy9XTUjOFo+SVsvpBg= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= -github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/oklog/run v1.2.0 h1:O8x3yXwah4A73hJdlrwo/2X6J62gE5qTMusH0dvz60E= +github.com/oklog/run v1.2.0/go.mod h1:mgDbKRSwPhJfesJ4PntqFUbKQRZ50NgmZTSPlFA0YFk= github.com/olekukonko/errors v1.1.0 h1:RNuGIh15QdDenh+hNvKrJkmxxjV4hcS50Db478Ou5sM= github.com/olekukonko/errors v1.1.0/go.mod h1:ppzxA5jBKcO1vIpCXQ9ZqgDh8iwODz6OXIGKU8r5m4Y= github.com/olekukonko/ll v0.0.9 h1:Y+1YqDfVkqMWuEQMclsF9HUR5+a82+dxJuL1HHSRpxI= @@ -722,12 +725,12 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns= -github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= +github.com/onsi/ginkgo/v2 v2.27.4 h1:fcEcQW/A++6aZAZQNUmNjvA9PSOzefMJBerHJ4t8v8Y= +github.com/onsi/ginkgo/v2 v2.27.4/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= -github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= +github.com/onsi/gomega v1.38.3 h1:eTX+W6dobAYfFeGC2PV6RwXRu/MyT+cQguijutvkpSM= +github.com/onsi/gomega v1.38.3/go.mod h1:ZCU1pkQcXDO5Sl9/VVEGlDyp+zm0m1cmeG5TOzLgdh4= github.com/open-policy-agent/opa v1.10.1 h1:haIvxZSPky8HLjRrvQwWAjCPLg8JDFSZMbbG4yyUHgY= github.com/open-policy-agent/opa v1.10.1/go.mod h1:7uPI3iRpOalJ0BhK6s1JALWPU9HvaV1XeBSSMZnr/PM= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= @@ -737,8 +740,8 @@ github.com/orcaman/concurrent-map v1.0.0 h1:I/2A2XPCb4IuQWcQhBhSwGfiuybl/J0ev9HD github.com/orcaman/concurrent-map v1.0.0/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI= github.com/owncloud/libre-graph-api-go v1.0.5-0.20251107084958-31937a4ea3f1 h1:uW3BUPdaAhti2aP8x3Vb79vzmqAgDaWZ0yrW+4ujjU8= github.com/owncloud/libre-graph-api-go v1.0.5-0.20251107084958-31937a4ea3f1/go.mod h1:z61VMGAJRtR1nbgXWiNoCkxUXP1B3Je9rMuJbnGd+Og= -github.com/owncloud/reva/v2 v2.0.0-20251107154850-a122a9538794 h1:j5IbfxSCcnMaIi8yei//NsXyHTEsJRz5tJSPnNcCERA= -github.com/owncloud/reva/v2 v2.0.0-20251107154850-a122a9538794/go.mod h1:3PTzYZopTiFJ6DWdZ19HP3iHh2I3Gav0cDNFQ1gMT6g= +github.com/owncloud/reva/v2 v2.0.0-20260116122933-81e6e21256eb h1:G4YhVvtdJDEE0fhV0sWMUDpcpV5WqQNdfLw9xNMfkFI= +github.com/owncloud/reva/v2 v2.0.0-20260116122933-81e6e21256eb/go.mod h1:nDBXQivlwd4Vf5UD/oDT0RakSXy0GJPI1LsyHMm5IpI= github.com/oxtoacart/bpool v0.0.0-20190530202638-03653db5a59c h1:rp5dCmg/yLR3mgFuSOe4oEnDDmGLROTvMragMUXpTQw= github.com/oxtoacart/bpool v0.0.0-20190530202638-03653db5a59c/go.mod h1:X07ZCGwUbLaax7L0S3Tw4hpejzu63ZrrQiUe6W0hcy0= github.com/pablodz/inotifywaitgo v0.0.9 h1:njquRbBU7fuwIe5rEvtaniVBjwWzcpdUVptSgzFqZsw= @@ -766,8 +769,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.2.0 h1:vBXSNuE5MYP9IJ5kjsdo8uq+w41jSPgvba2DEnkRx9k= github.com/pquerna/cachecontrol v0.2.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= -github.com/prometheus/alertmanager v0.28.1 h1:BK5pCoAtaKg01BYRUJhEDV1tqJMEtYBGzPw8QdvnnvA= -github.com/prometheus/alertmanager v0.28.1/go.mod h1:0StpPUDDHi1VXeM7p2yYfeZgLVi/PPlt39vo9LQUHxM= +github.com/prometheus/alertmanager v0.30.0 h1:E4dnxSFXK8V2Bb8iqudlisTmaIrF3hRJSWnliG08tBM= +github.com/prometheus/alertmanager v0.30.0/go.mod h1:93PBumcTLr/gNtNtM0m7BcCffbvYP5bKuLBWiOnISaA= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -792,8 +795,8 @@ github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9 github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= -github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= +github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc= +github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI= github.com/prometheus/procfs v0.0.0-20170703101242-e645f4e5aaa8/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -840,8 +843,8 @@ github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw= github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/sethvargo/go-password v0.3.1 h1:WqrLTjo7X6AcVYfC6R7GtSyuUQR9hGyAj/f1PYQZCJU= github.com/sethvargo/go-password v0.3.1/go.mod h1:rXofC1zT54N7R8K/h1WDUdkf9BOx5OptoxrMBcrXzvs= -github.com/shamaton/msgpack/v2 v2.3.1 h1:R3QNLIGA/tbdczNMZ5PCRxrXvy+fnzsIaHG4kKMgWYo= -github.com/shamaton/msgpack/v2 v2.3.1/go.mod h1:6khjYnkx73f7VQU7wjcFS9DFjs+59naVWJv1TB7qdOI= +github.com/shamaton/msgpack/v2 v2.4.0 h1:O5Z08MRmbo0lA9o2xnQ4TXx6teJbPqEurqcCOQ8Oi/4= +github.com/shamaton/msgpack/v2 v2.4.0/go.mod h1:6khjYnkx73f7VQU7wjcFS9DFjs+59naVWJv1TB7qdOI= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c h1:aqg5Vm5dwtvL+YgDpBcK1ITf3o96N/K7/wsRXQnUTEs= @@ -948,12 +951,12 @@ github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.etcd.io/bbolt v1.4.2 h1:IrUHp260R8c+zYx/Tm8QZr04CX+qWS5PGfPdevhdm1I= go.etcd.io/bbolt v1.4.2/go.mod h1:Is8rSHO/b4f3XigBC0lL0+4FwAQv3HXEEIgFMuKHceM= -go.etcd.io/etcd/api/v3 v3.6.5 h1:pMMc42276sgR1j1raO/Qv3QI9Af/AuyQUW6CBAWuntA= -go.etcd.io/etcd/api/v3 v3.6.5/go.mod h1:ob0/oWA/UQQlT1BmaEkWQzI0sJ1M0Et0mMpaABxguOQ= -go.etcd.io/etcd/client/pkg/v3 v3.6.5 h1:Duz9fAzIZFhYWgRjp/FgNq2gO1jId9Yae/rLn3RrBP8= -go.etcd.io/etcd/client/pkg/v3 v3.6.5/go.mod h1:8Wx3eGRPiy0qOFMZT/hfvdos+DjEaPxdIDiCDUv/FQk= -go.etcd.io/etcd/client/v3 v3.6.5 h1:yRwZNFBx/35VKHTcLDeO7XVLbCBFbPi+XV4OC3QJf2U= -go.etcd.io/etcd/client/v3 v3.6.5/go.mod h1:ZqwG/7TAFZ0BJ0jXRPoJjKQJtbFo/9NIY8uoFFKcCyo= +go.etcd.io/etcd/api/v3 v3.6.7 h1:7BNJ2gQmc3DNM+9cRkv7KkGQDayElg8x3X+tFDYS+E0= +go.etcd.io/etcd/api/v3 v3.6.7/go.mod h1:xJ81TLj9hxrYYEDmXTeKURMeY3qEDN24hqe+q7KhbnI= +go.etcd.io/etcd/client/pkg/v3 v3.6.7 h1:vvzgyozz46q+TyeGBuFzVuI53/yd133CHceNb/AhBVs= +go.etcd.io/etcd/client/pkg/v3 v3.6.7/go.mod h1:2IVulJ3FZ/czIGl9T4lMF1uxzrhRahLqe+hSgy+Kh7Q= +go.etcd.io/etcd/client/v3 v3.6.7 h1:9WqA5RpIBtdMxAy1ukXLAdtg2pAxNqW5NUoO2wQrE6U= +go.etcd.io/etcd/client/v3 v3.6.7/go.mod h1:2XfROY56AXnUqGsvl+6k29wrwsSbEh1lAouQB1vHpeE= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -962,16 +965,16 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 h1:YH4g8lQroajqUwWbq/tr2QX1JFmEXaDLgG+ew9bLMWo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.64.0 h1:RN3ifU8y4prNWeEnQp2kRRHz8UwonAEYZl8tUzHEXAk= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.64.0/go.mod h1:habDz3tEWiFANTo6oUE99EmaFUrCNYAAg3wiVmusm70= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= go.opentelemetry.io/contrib/zpages v0.63.0 h1:TppOKuZGbqXMgsfjqq3i09N5Vbo1JLtLImUqiTPGnX4= go.opentelemetry.io/contrib/zpages v0.63.0/go.mod h1:5F8uugz75ay/MMhRRhxAXY33FuaI8dl7jTxefrIy5qk= -go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= -go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48= +go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8= go.opentelemetry.io/otel/exporters/jaeger v1.17.0 h1:D7UpUy2Xc2wsi1Ras6V40q806WM07rqoCWzXu7Sqy+4= go.opentelemetry.io/otel/exporters/jaeger v1.17.0/go.mod h1:nPCqOnEH9rNLKqH/+rrUjiMzHJdV1BlpKcTwRTyKkKI= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24= @@ -980,14 +983,14 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4D go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 h1:aTL7F04bJHUlztTsNGJ2l+6he8c+y/b//eR0jjjemT4= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0/go.mod h1:kldtb7jDTeol0l3ewcmd8SDvx3EmIE7lyvqbasU3QC4= -go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= -go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= -go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= -go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= -go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= -go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= -go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= -go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0= +go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs= +go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18= +go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE= +go.opentelemetry.io/otel/sdk/metric v1.39.0 h1:cXMVVFVgsIf2YL6QkRF4Urbr/aMInf+2WKg+sEJTtB8= +go.opentelemetry.io/otel/sdk/metric v1.39.0/go.mod h1:xq9HEVH7qeX69/JnwEfp6fVq5wosJsY1mt4lLfYdVew= +go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI= +go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA= go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4= go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -1000,8 +1003,8 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8 go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= -go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1017,8 +1020,8 @@ golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= -golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= -golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= +golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= +golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1060,8 +1063,8 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= -golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= +golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1135,8 +1138,8 @@ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= -golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1229,8 +1232,8 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= -golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= -golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= +golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= +golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1285,8 +1288,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= -golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= -golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= +golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= golang.org/x/tools/godoc v0.1.0-deprecated h1:o+aZ1BOj6Hsx/GBdJO/s815sqftjSnrZZwyYTHODvtk= golang.org/x/tools/godoc v0.1.0-deprecated/go.mod h1:qM63CriJ961IHWmnWa9CjZnBndniPt4a3CK0PVB9bIg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1349,10 +1352,10 @@ google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb h1:ITgPrl429bc6+2ZraNSzMDk3I95nmQln2fuPstKwFDE= google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:sAo5UzpjUwgFBCzupwhcLcxHVDK7vG5IqI30YnwX2eE= -google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY= -google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc= +google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 h1:mepRgnBZa07I4TRuomDE4sTIYieg/osKmzIf4USdWS4= +google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 h1:gRkg/vSppuSQoDjxyiGfN4Upv/h/DQmIR10ZU8dh4Ww= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1366,8 +1369,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.76.0 h1:UnVkv1+uMLYXoIz6o7chp59WfQUYA2ex/BXQ9rHZu7A= -google.golang.org/grpc v1.76.0/go.mod h1:Ju12QI8M6iQJtbcsV+awF5a4hfJMLi4X0JLo94ULZ6c= +google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= +google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= google.golang.org/grpc/examples v0.0.0-20211102180624-670c133e568e h1:m7aQHHqd0q89mRwhwS9Bx2rjyl/hsFAeta+uGrHsQaU= google.golang.org/grpc/examples v0.0.0-20211102180624-670c133e568e/go.mod h1:gID3PKrg7pWKntu9Ss6zTLJ0ttC0X9IHgREOCZwbCVU= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -1384,8 +1387,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= -google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= +google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/cenkalti/backoff.v1 v1.1.0 h1:Arh75ttbsvlpVA7WtVpH4u9h6Zl46xuptxqLxPiSo4Y= gopkg.in/cenkalti/backoff.v1 v1.1.0/go.mod h1:J6Vskwqd+OMVJl8C33mmtxTBs2gyzfv7UDAkHu8BrjI= diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md index 235496eeb29..1101d206d40 100644 --- a/vendor/github.com/BurntSushi/toml/README.md +++ b/vendor/github.com/BurntSushi/toml/README.md @@ -1,7 +1,7 @@ TOML stands for Tom's Obvious, Minimal Language. This Go package provides a reflection interface similar to Go's standard library `json` and `xml` packages. -Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0). +Compatible with TOML version [v1.1.0](https://toml.io/en/v1.1.0). Documentation: https://pkg.go.dev/github.com/BurntSushi/toml diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go index 3fa516caa20..ed884840fb4 100644 --- a/vendor/github.com/BurntSushi/toml/decode.go +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -206,6 +206,13 @@ func markDecodedRecursive(md *MetaData, tmap map[string]any) { markDecodedRecursive(md, tmap) md.context = md.context[0 : len(md.context)-1] } + if tarr, ok := tmap[key].([]map[string]any); ok { + for _, elm := range tarr { + md.context = append(md.context, key) + markDecodedRecursive(md, elm) + md.context = md.context[0 : len(md.context)-1] + } + } } } @@ -423,7 +430,7 @@ func (md *MetaData) unifyString(data any, rv reflect.Value) error { if i, ok := data.(int64); ok { rv.SetString(strconv.FormatInt(i, 10)) } else if f, ok := data.(float64); ok { - rv.SetString(strconv.FormatFloat(f, 'f', -1, 64)) + rv.SetString(strconv.FormatFloat(f, 'g', -1, 64)) } else { return md.badtype("string", data) } diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go index ac196e7df88..bd7aa18655d 100644 --- a/vendor/github.com/BurntSushi/toml/encode.go +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -228,9 +228,9 @@ func (enc *Encoder) eElement(rv reflect.Value) { } switch v.Location() { default: - enc.wf(v.Format(format)) + enc.write(v.Format(format)) case internal.LocalDatetime, internal.LocalDate, internal.LocalTime: - enc.wf(v.In(time.UTC).Format(format)) + enc.write(v.In(time.UTC).Format(format)) } return case Marshaler: @@ -279,40 +279,40 @@ func (enc *Encoder) eElement(rv reflect.Value) { case reflect.String: enc.writeQuoted(rv.String()) case reflect.Bool: - enc.wf(strconv.FormatBool(rv.Bool())) + enc.write(strconv.FormatBool(rv.Bool())) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - enc.wf(strconv.FormatInt(rv.Int(), 10)) + enc.write(strconv.FormatInt(rv.Int(), 10)) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - enc.wf(strconv.FormatUint(rv.Uint(), 10)) + enc.write(strconv.FormatUint(rv.Uint(), 10)) case reflect.Float32: f := rv.Float() if math.IsNaN(f) { if math.Signbit(f) { - enc.wf("-") + enc.write("-") } - enc.wf("nan") + enc.write("nan") } else if math.IsInf(f, 0) { if math.Signbit(f) { - enc.wf("-") + enc.write("-") } - enc.wf("inf") + enc.write("inf") } else { - enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32))) + enc.write(floatAddDecimal(strconv.FormatFloat(f, 'g', -1, 32))) } case reflect.Float64: f := rv.Float() if math.IsNaN(f) { if math.Signbit(f) { - enc.wf("-") + enc.write("-") } - enc.wf("nan") + enc.write("nan") } else if math.IsInf(f, 0) { if math.Signbit(f) { - enc.wf("-") + enc.write("-") } - enc.wf("inf") + enc.write("inf") } else { - enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64))) + enc.write(floatAddDecimal(strconv.FormatFloat(f, 'g', -1, 64))) } case reflect.Array, reflect.Slice: enc.eArrayOrSliceElement(rv) @@ -330,27 +330,32 @@ func (enc *Encoder) eElement(rv reflect.Value) { // By the TOML spec, all floats must have a decimal with at least one number on // either side. func floatAddDecimal(fstr string) string { - if !strings.Contains(fstr, ".") { - return fstr + ".0" + for _, c := range fstr { + if c == 'e' { // Exponent syntax + return fstr + } + if c == '.' { + return fstr + } } - return fstr + return fstr + ".0" } func (enc *Encoder) writeQuoted(s string) { - enc.wf("\"%s\"", dblQuotedReplacer.Replace(s)) + enc.write(`"` + dblQuotedReplacer.Replace(s) + `"`) } func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { length := rv.Len() - enc.wf("[") + enc.write("[") for i := 0; i < length; i++ { elem := eindirect(rv.Index(i)) enc.eElement(elem) if i != length-1 { - enc.wf(", ") + enc.write(", ") } } - enc.wf("]") + enc.write("]") } func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { @@ -363,7 +368,7 @@ func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { continue } enc.newline() - enc.wf("%s[[%s]]", enc.indentStr(key), key) + enc.writef("%s[[%s]]", enc.indentStr(key), key) enc.newline() enc.eMapOrStruct(key, trv, false) } @@ -376,7 +381,7 @@ func (enc *Encoder) eTable(key Key, rv reflect.Value) { enc.newline() } if len(key) > 0 { - enc.wf("%s[%s]", enc.indentStr(key), key) + enc.writef("%s[%s]", enc.indentStr(key), key) enc.newline() } enc.eMapOrStruct(key, rv, false) @@ -422,7 +427,7 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { if inline { enc.writeKeyValue(Key{mapKey.String()}, val, true) if trailC || i != len(mapKeys)-1 { - enc.wf(", ") + enc.write(", ") } } else { enc.encode(key.add(mapKey.String()), val) @@ -431,12 +436,12 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { } if inline { - enc.wf("{") + enc.write("{") } writeMapKeys(mapKeysDirect, len(mapKeysSub) > 0) writeMapKeys(mapKeysSub, false) if inline { - enc.wf("}") + enc.write("}") } } @@ -534,7 +539,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { if inline { enc.writeKeyValue(Key{keyName}, fieldVal, true) if fieldIndex[0] != totalFields-1 { - enc.wf(", ") + enc.write(", ") } } else { enc.encode(key.add(keyName), fieldVal) @@ -543,14 +548,14 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { } if inline { - enc.wf("{") + enc.write("{") } l := len(fieldsDirect) + len(fieldsSub) writeFields(fieldsDirect, l) writeFields(fieldsSub, l) if inline { - enc.wf("}") + enc.write("}") } } @@ -700,7 +705,7 @@ func isEmpty(rv reflect.Value) bool { func (enc *Encoder) newline() { if enc.hasWritten { - enc.wf("\n") + enc.write("\n") } } @@ -722,14 +727,22 @@ func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) { enc.eElement(val) return } - enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) + enc.writef("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) enc.eElement(val) if !inline { enc.newline() } } -func (enc *Encoder) wf(format string, v ...any) { +func (enc *Encoder) write(s string) { + _, err := enc.w.WriteString(s) + if err != nil { + encPanic(err) + } + enc.hasWritten = true +} + +func (enc *Encoder) writef(format string, v ...any) { _, err := fmt.Fprintf(enc.w, format, v...) if err != nil { encPanic(err) diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go index 1c3b4770293..9f4396a0f75 100644 --- a/vendor/github.com/BurntSushi/toml/lex.go +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -13,7 +13,6 @@ type itemType int const ( itemError itemType = iota - itemNIL // used in the parser to indicate no type itemEOF itemText itemString @@ -47,14 +46,13 @@ func (p Position) String() string { } type lexer struct { - input string - start int - pos int - line int - state stateFn - items chan item - tomlNext bool - esc bool + input string + start int + pos int + line int + state stateFn + items chan item + esc bool // Allow for backing up up to 4 runes. This is necessary because TOML // contains 3-rune tokens (""" and '''). @@ -90,14 +88,13 @@ func (lx *lexer) nextItem() item { } } -func lex(input string, tomlNext bool) *lexer { +func lex(input string) *lexer { lx := &lexer{ - input: input, - state: lexTop, - items: make(chan item, 10), - stack: make([]stateFn, 0, 10), - line: 1, - tomlNext: tomlNext, + input: input, + state: lexTop, + items: make(chan item, 10), + stack: make([]stateFn, 0, 10), + line: 1, } return lx } @@ -108,7 +105,7 @@ func (lx *lexer) push(state stateFn) { func (lx *lexer) pop() stateFn { if len(lx.stack) == 0 { - return lx.errorf("BUG in lexer: no states to pop") + panic("BUG in lexer: no states to pop") } last := lx.stack[len(lx.stack)-1] lx.stack = lx.stack[0 : len(lx.stack)-1] @@ -305,6 +302,8 @@ func lexTop(lx *lexer) stateFn { return lexTableStart case eof: if lx.pos > lx.start { + // TODO: never reached? I think this can only occur on a bug in the + // lexer(?) return lx.errorf("unexpected EOF") } lx.emit(itemEOF) @@ -392,8 +391,6 @@ func lexTableNameStart(lx *lexer) stateFn { func lexTableNameEnd(lx *lexer) stateFn { lx.skip(isWhitespace) switch r := lx.next(); { - case isWhitespace(r): - return lexTableNameEnd case r == '.': lx.ignore() return lexTableNameStart @@ -412,7 +409,7 @@ func lexTableNameEnd(lx *lexer) stateFn { // Lexes only one part, e.g. only 'a' inside 'a.b'. func lexBareName(lx *lexer) stateFn { r := lx.next() - if isBareKeyChar(r, lx.tomlNext) { + if isBareKeyChar(r) { return lexBareName } lx.backup() @@ -420,23 +417,23 @@ func lexBareName(lx *lexer) stateFn { return lx.pop() } -// lexBareName lexes one part of a key or table. -// -// It assumes that at least one valid character for the table has already been -// read. +// lexQuotedName lexes one part of a quoted key or table name. It assumes that +// it starts lexing at the quote itself (" or '). // // Lexes only one part, e.g. only '"a"' inside '"a".b'. func lexQuotedName(lx *lexer) stateFn { r := lx.next() switch { - case isWhitespace(r): - return lexSkip(lx, lexValue) case r == '"': lx.ignore() // ignore the '"' return lexString case r == '\'': lx.ignore() // ignore the "'" return lexRawString + + // TODO: I don't think any of the below conditions can ever be reached? + case isWhitespace(r): + return lexSkip(lx, lexValue) case r == eof: return lx.errorf("unexpected EOF; expected value") default: @@ -464,17 +461,19 @@ func lexKeyStart(lx *lexer) stateFn { func lexKeyNameStart(lx *lexer) stateFn { lx.skip(isWhitespace) switch r := lx.peek(); { - case r == '=' || r == eof: - return lx.errorf("unexpected '='") - case r == '.': - return lx.errorf("unexpected '.'") + default: + lx.push(lexKeyEnd) + return lexBareName case r == '"' || r == '\'': lx.ignore() lx.push(lexKeyEnd) return lexQuotedName - default: - lx.push(lexKeyEnd) - return lexBareName + + // TODO: I think these can never be reached? + case r == '=' || r == eof: + return lx.errorf("unexpected '='") + case r == '.': + return lx.errorf("unexpected '.'") } } @@ -485,7 +484,7 @@ func lexKeyEnd(lx *lexer) stateFn { switch r := lx.next(); { case isWhitespace(r): return lexSkip(lx, lexKeyEnd) - case r == eof: + case r == eof: // TODO: never reached return lx.errorf("unexpected EOF; expected key separator '='") case r == '.': lx.ignore() @@ -628,10 +627,7 @@ func lexInlineTableValue(lx *lexer) stateFn { case isWhitespace(r): return lexSkip(lx, lexInlineTableValue) case isNL(r): - if lx.tomlNext { - return lexSkip(lx, lexInlineTableValue) - } - return lx.errorPrevLine(errLexInlineTableNL{}) + return lexSkip(lx, lexInlineTableValue) case r == '#': lx.push(lexInlineTableValue) return lexCommentStart @@ -653,10 +649,7 @@ func lexInlineTableValueEnd(lx *lexer) stateFn { case isWhitespace(r): return lexSkip(lx, lexInlineTableValueEnd) case isNL(r): - if lx.tomlNext { - return lexSkip(lx, lexInlineTableValueEnd) - } - return lx.errorPrevLine(errLexInlineTableNL{}) + return lexSkip(lx, lexInlineTableValueEnd) case r == '#': lx.push(lexInlineTableValueEnd) return lexCommentStart @@ -664,10 +657,7 @@ func lexInlineTableValueEnd(lx *lexer) stateFn { lx.ignore() lx.skip(isWhitespace) if lx.peek() == '}' { - if lx.tomlNext { - return lexInlineTableValueEnd - } - return lx.errorf("trailing comma not allowed in inline tables") + return lexInlineTableValueEnd } return lexInlineTableValue case r == '}': @@ -855,9 +845,6 @@ func lexStringEscape(lx *lexer) stateFn { r := lx.next() switch r { case 'e': - if !lx.tomlNext { - return lx.error(errLexEscape{r}) - } fallthrough case 'b': fallthrough @@ -878,9 +865,6 @@ func lexStringEscape(lx *lexer) stateFn { case '\\': return lx.pop() case 'x': - if !lx.tomlNext { - return lx.error(errLexEscape{r}) - } return lexHexEscape case 'u': return lexShortUnicodeEscape @@ -928,19 +912,9 @@ func lexLongUnicodeEscape(lx *lexer) stateFn { // lexBaseNumberOrDate can differentiate base prefixed integers from other // types. func lexNumberOrDateStart(lx *lexer) stateFn { - r := lx.next() - switch r { - case '0': + if lx.next() == '0' { return lexBaseNumberOrDate } - - if !isDigit(r) { - // The only way to reach this state is if the value starts - // with a digit, so specifically treat anything else as an - // error. - return lx.errorf("expected a digit but got %q", r) - } - return lexNumberOrDate } @@ -1196,13 +1170,13 @@ func lexSkip(lx *lexer, nextState stateFn) stateFn { } func (s stateFn) String() string { + if s == nil { + return "" + } name := runtime.FuncForPC(reflect.ValueOf(s).Pointer()).Name() if i := strings.LastIndexByte(name, '.'); i > -1 { name = name[i+1:] } - if s == nil { - name = "" - } return name + "()" } @@ -1210,8 +1184,6 @@ func (itype itemType) String() string { switch itype { case itemError: return "Error" - case itemNIL: - return "NIL" case itemEOF: return "EOF" case itemText: @@ -1226,18 +1198,22 @@ func (itype itemType) String() string { return "Float" case itemDatetime: return "DateTime" - case itemTableStart: - return "TableStart" - case itemTableEnd: - return "TableEnd" - case itemKeyStart: - return "KeyStart" - case itemKeyEnd: - return "KeyEnd" case itemArray: return "Array" case itemArrayEnd: return "ArrayEnd" + case itemTableStart: + return "TableStart" + case itemTableEnd: + return "TableEnd" + case itemArrayTableStart: + return "ArrayTableStart" + case itemArrayTableEnd: + return "ArrayTableEnd" + case itemKeyStart: + return "KeyStart" + case itemKeyEnd: + return "KeyEnd" case itemCommentStart: return "CommentStart" case itemInlineTableStart: @@ -1266,7 +1242,7 @@ func isDigit(r rune) bool { return r >= '0' && r <= '9' } func isBinary(r rune) bool { return r == '0' || r == '1' } func isOctal(r rune) bool { return r >= '0' && r <= '7' } func isHex(r rune) bool { return (r >= '0' && r <= '9') || (r|0x20 >= 'a' && r|0x20 <= 'f') } -func isBareKeyChar(r rune, tomlNext bool) bool { +func isBareKeyChar(r rune) bool { return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '_' || r == '-' } diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go index e3ea8a9a2d2..b474247ae2f 100644 --- a/vendor/github.com/BurntSushi/toml/parse.go +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -3,7 +3,6 @@ package toml import ( "fmt" "math" - "os" "strconv" "strings" "time" @@ -17,7 +16,6 @@ type parser struct { context Key // Full key for the current hash in scope. currentKey string // Base key name for everything except hashes. pos Position // Current position in the TOML file. - tomlNext bool ordered []Key // List of keys in the order that they appear in the TOML data. @@ -32,8 +30,6 @@ type keyInfo struct { } func parse(data string) (p *parser, err error) { - _, tomlNext := os.LookupEnv("BURNTSUSHI_TOML_110") - defer func() { if r := recover(); r != nil { if pErr, ok := r.(ParseError); ok { @@ -73,10 +69,9 @@ func parse(data string) (p *parser, err error) { p = &parser{ keyInfo: make(map[string]keyInfo), mapping: make(map[string]any), - lx: lex(data, tomlNext), + lx: lex(data), ordered: make([]Key, 0), implicits: make(map[string]struct{}), - tomlNext: tomlNext, } for { item := p.next() @@ -350,17 +345,14 @@ func (p *parser) valueFloat(it item) (any, tomlType) { var dtTypes = []struct { fmt string zone *time.Location - next bool }{ - {time.RFC3339Nano, time.Local, false}, - {"2006-01-02T15:04:05.999999999", internal.LocalDatetime, false}, - {"2006-01-02", internal.LocalDate, false}, - {"15:04:05.999999999", internal.LocalTime, false}, - - // tomlNext - {"2006-01-02T15:04Z07:00", time.Local, true}, - {"2006-01-02T15:04", internal.LocalDatetime, true}, - {"15:04", internal.LocalTime, true}, + {time.RFC3339Nano, time.Local}, + {"2006-01-02T15:04:05.999999999", internal.LocalDatetime}, + {"2006-01-02", internal.LocalDate}, + {"15:04:05.999999999", internal.LocalTime}, + {"2006-01-02T15:04Z07:00", time.Local}, + {"2006-01-02T15:04", internal.LocalDatetime}, + {"15:04", internal.LocalTime}, } func (p *parser) valueDatetime(it item) (any, tomlType) { @@ -371,9 +363,6 @@ func (p *parser) valueDatetime(it item) (any, tomlType) { err error ) for _, dt := range dtTypes { - if dt.next && !p.tomlNext { - continue - } t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone) if err == nil { if missingLeadingZero(it.val, dt.fmt) { @@ -644,6 +633,11 @@ func (p *parser) setValue(key string, value any) { // Note that since it has already been defined (as a hash), we don't // want to overwrite it. So our business is done. if p.isArray(keyContext) { + if !p.isImplicit(keyContext) { + if _, ok := hash[key]; ok { + p.panicf("Key '%s' has already been defined.", keyContext) + } + } p.removeImplicit(keyContext) hash[key] = value return @@ -802,10 +796,8 @@ func (p *parser) replaceEscapes(it item, str string) string { b.WriteByte(0x0d) skip = 1 case 'e': - if p.tomlNext { - b.WriteByte(0x1b) - skip = 1 - } + b.WriteByte(0x1b) + skip = 1 case '"': b.WriteByte(0x22) skip = 1 @@ -815,11 +807,9 @@ func (p *parser) replaceEscapes(it item, str string) string { // The lexer guarantees the correct number of characters are present; // don't need to check here. case 'x': - if p.tomlNext { - escaped := p.asciiEscapeToUnicode(it, str[i+2:i+4]) - b.WriteRune(escaped) - skip = 3 - } + escaped := p.asciiEscapeToUnicode(it, str[i+2:i+4]) + b.WriteRune(escaped) + skip = 3 case 'u': escaped := p.asciiEscapeToUnicode(it, str[i+2:i+6]) b.WriteRune(escaped) diff --git a/vendor/github.com/KimMachineGun/automemlimit/memlimit/cgroups.go b/vendor/github.com/KimMachineGun/automemlimit/memlimit/cgroups.go index 81559e3bf95..2f0a8404373 100644 --- a/vendor/github.com/KimMachineGun/automemlimit/memlimit/cgroups.go +++ b/vendor/github.com/KimMachineGun/automemlimit/memlimit/cgroups.go @@ -103,8 +103,8 @@ func getMemoryLimitV2(chs []cgroupHierarchy, mis []mountInfo) (uint64, error) { return 0, err } - // retrieve the memory limit from the memory.max file - return readMemoryLimitV2FromPath(filepath.Join(cgroupPath, "memory.max")) + // retrieve the memory limit from the memory.max recursively. + return walkCgroupV2Hierarchy(cgroupPath, mountPoint) } // readMemoryLimitV2FromPath reads the memory limit for cgroup v2 from the given path. @@ -131,6 +131,39 @@ func readMemoryLimitV2FromPath(path string) (uint64, error) { return limit, nil } +// walkCgroupV2Hierarchy walks up the cgroup v2 hierarchy to find the most restrictive memory limit. +func walkCgroupV2Hierarchy(cgroupPath, mountPoint string) (uint64, error) { + var ( + found = false + minLimit uint64 = math.MaxUint64 + currentPath = cgroupPath + ) + for { + limit, err := readMemoryLimitV2FromPath(filepath.Join(currentPath, "memory.max")) + if err != nil && !errors.Is(err, ErrNoLimit) { + return 0, err + } else if err == nil { + found = true + minLimit = min(minLimit, limit) + } + + if currentPath == mountPoint { + break + } + + parent := filepath.Dir(currentPath) + if parent == currentPath { + break + } + currentPath = parent + } + if !found { + return 0, ErrNoLimit + } + + return minLimit, nil +} + // getMemoryLimitV1 retrieves the memory limit from the cgroup v1 controller. func getMemoryLimitV1(chs []cgroupHierarchy, mis []mountInfo) (uint64, error) { // find the cgroup v1 path for the memory controller. diff --git a/vendor/github.com/antithesishq/antithesis-sdk-go/assert/assert.go b/vendor/github.com/antithesishq/antithesis-sdk-go/assert/assert.go index 033fadfb2a6..3ede0101e47 100644 --- a/vendor/github.com/antithesishq/antithesis-sdk-go/assert/assert.go +++ b/vendor/github.com/antithesishq/antithesis-sdk-go/assert/assert.go @@ -17,13 +17,18 @@ // [test properties]: https://antithesis.com/docs/using_antithesis/properties/ // [workload]: https://antithesis.com/docs/getting_started/first_test/ // [antithesis-go-generator]: https://antithesis.com/docs/using_antithesis/sdk/go/instrumentor/ -// [triage report]: https://antithesis.com/docs/reports/triage/ +// [triage report]: https://antithesis.com/docs/reports/ // [here]: https://antithesis.com/docs/using_antithesis/sdk/fallback/ // [Sometimes assertions]: https://antithesis.com/docs/best_practices/sometimes_assertions/ // -// [details]: https://antithesis.com/docs/reports/triage/#details +// [details]: https://antithesis.com/docs/reports/ package assert +import ( + "encoding/json" + "fmt" +) + type assertInfo struct { Location *locationInfo `json:"location"` Details map[string]any `json:"details"` @@ -36,6 +41,64 @@ type assertInfo struct { Condition bool `json:"condition"` } +// Create a custom json marshaler for assertInfo so that we can force Errors to be marshaled with their error details. +// Without this, custom errors are marshaled as an empty object because the default json marshaler doesn't include the error +// (because it's a method - not an exported struct field). +func (f assertInfo) MarshalJSON() ([]byte, error) { + type alias assertInfo // prevent infinite recursion + a := alias(f) + if a.Details != nil { + a.Details = normalizeMap(a.Details) + } + return json.Marshal(a) +} + +type jsonError struct { + innerError error +} + +func (e jsonError) MarshalJSON() ([]byte, error) { + // Marshal this as the debug output string instead of e.Error(). These should be equivalent, but Sprintf correctly + // handles nil values for us (which otherwise are annoying to defend against due to this - https://go.dev/doc/faq#nil_error) + return json.Marshal(fmt.Sprintf("%+v", e.innerError)) +} + +// Recursively replace any `error` with jsonError while doing a deep copy. +// Most of the logic is in the normalize method below. This method exists to localize the type assertions +// and provide a function that takes in/out a map instead of any. +func normalizeMap(v map[string]any) map[string]any { + return normalize(v).(map[string]any) +} + +func normalize(input any) any { + // This switch will miss some cases (pointers, structs, non-any types), but should catch a very large proportion of real error interfaces + // in real details objects. We can augment this if we find other cases common enough to support. + switch inputTyped := input.(type) { + case error: + // Check if the underlying error implements json.Marshaler, so that if the error + // already knows who to marshal itself, we don't override that. + if _, ok := inputTyped.(json.Marshaler); ok { + return inputTyped + } else { + return jsonError{inputTyped} + } + case map[string]any: + out := make(map[string]any, len(inputTyped)) + for k, v := range inputTyped { + out[k] = normalize(v) + } + return out + case []any: + out := make([]any, len(inputTyped)) + for i := range inputTyped { + out[i] = normalize(inputTyped[i]) + } + return out + default: + return input + } +} + type wrappedAssertInfo struct { A *assertInfo `json:"antithesis_assert"` } diff --git a/vendor/github.com/antithesishq/antithesis-sdk-go/internal/emit.go b/vendor/github.com/antithesishq/antithesis-sdk-go/internal/emit.go index 97967c2a9db..a932f5cf4a4 100644 --- a/vendor/github.com/antithesishq/antithesis-sdk-go/internal/emit.go +++ b/vendor/github.com/antithesishq/antithesis-sdk-go/internal/emit.go @@ -4,67 +4,11 @@ package internal import ( "encoding/json" - "fmt" "log" "math/rand" "os" - "unsafe" ) -// -------------------------------------------------------------------------------- -// To build and run an executable with this package -// -// CC=clang CGO_ENABLED=1 go run ./main.go -// -------------------------------------------------------------------------------- - -// \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ -// -// The commented lines below, and the `import "C"` line which must directly follow -// the commented lines are used by CGO. They are load-bearing, and should not be -// changed without first understanding how CGO uses them. -// -// \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ - -// #cgo LDFLAGS: -ldl -// -// #include -// #include -// #include -// #include -// -// typedef void (*go_fuzz_json_data_fn)(const char *data, size_t size); -// void -// go_fuzz_json_data(void *f, const char *data, size_t size) { -// ((go_fuzz_json_data_fn)f)(data, size); -// } -// -// typedef void (*go_fuzz_flush_fn)(void); -// void -// go_fuzz_flush(void *f) { -// ((go_fuzz_flush_fn)f)(); -// } -// -// typedef uint64_t (*go_fuzz_get_random_fn)(void); -// uint64_t -// go_fuzz_get_random(void *f) { -// return ((go_fuzz_get_random_fn)f)(); -// } -// -// typedef bool (*go_notify_coverage_fn)(size_t); -// int -// go_notify_coverage(void *f, size_t edges) { -// bool b = ((go_notify_coverage_fn)f)(edges); -// return b ? 1 : 0; -// } -// -// typedef uint64_t (*go_init_coverage_fn)(size_t num_edges, const char *symbols); -// uint64_t -// go_init_coverage(void *f, size_t num_edges, const char *symbols) { -// return ((go_init_coverage_fn)f)(num_edges, symbols); -// } -// -import "C" - func Json_data(v any) error { if data, err := json.Marshal(v); err != nil { return err @@ -95,45 +39,10 @@ type libHandler interface { const ( errorLogLinePrefix = "[* antithesis-sdk-go *]" - defaultNativeLibraryPath = "/usr/lib/libvoidstar.so" ) var handler libHandler -type voidstarHandler struct { - fuzzJsonData unsafe.Pointer - fuzzFlush unsafe.Pointer - fuzzGetRandom unsafe.Pointer - initCoverage unsafe.Pointer - notifyCoverage unsafe.Pointer -} - -func (h *voidstarHandler) output(message string) { - msg_len := len(message) - if msg_len == 0 { - return - } - cstrMessage := C.CString(message) - defer C.free(unsafe.Pointer(cstrMessage)) - C.go_fuzz_json_data(h.fuzzJsonData, cstrMessage, C.ulong(msg_len)) - C.go_fuzz_flush(h.fuzzFlush) -} - -func (h *voidstarHandler) random() uint64 { - return uint64(C.go_fuzz_get_random(h.fuzzGetRandom)) -} - -func (h *voidstarHandler) init_coverage(num_edge uint64, symbols string) uint64 { - cstrSymbols := C.CString(symbols) - defer C.free(unsafe.Pointer(cstrSymbols)) - return uint64(C.go_init_coverage(h.initCoverage, C.ulong(num_edge), cstrSymbols)) -} - -func (h *voidstarHandler) notify(edge uint64) bool { - ival := int(C.go_notify_coverage(h.notifyCoverage, C.ulong(edge))) - return ival == 1 -} - type localHandler struct { outputFile *os.File // can be nil } @@ -160,63 +69,12 @@ func (h *localHandler) init_coverage(num_edges uint64, symbols string) uint64 { return 0 } -// If we have a file at `defaultNativeLibraryPath`, we load the shared library -// (and panic on any error encountered during load). -// Otherwise fallback to the local handler. func init() { - if _, err := os.Stat(defaultNativeLibraryPath); err == nil { - if handler, err = openSharedLib(defaultNativeLibraryPath); err != nil { - panic(err) - } - return + handler = init_in_antithesis() + if handler == nil { + // Otherwise fallback to the local handler. + handler = openLocalHandler() } - handler = openLocalHandler() -} - -// Attempt to load libvoidstar and some symbols from `path` -func openSharedLib(path string) (*voidstarHandler, error) { - cstrPath := C.CString(path) - defer C.free(unsafe.Pointer(cstrPath)) - - dlError := func(message string) error { - return fmt.Errorf("%s: (%s)", message, C.GoString(C.dlerror())) - } - - sharedLib := C.dlopen(cstrPath, C.int(C.RTLD_NOW)) - if sharedLib == nil { - return nil, dlError("Can not load the Antithesis native library") - } - - loadFunc := func(name string) (symbol unsafe.Pointer, err error) { - cstrName := C.CString(name) - defer C.free(unsafe.Pointer(cstrName)) - if symbol = C.dlsym(sharedLib, cstrName); symbol == nil { - err = dlError(fmt.Sprintf("Can not access symbol %s", name)) - } - return - } - - fuzzJsonData, err := loadFunc("fuzz_json_data") - if err != nil { - return nil, err - } - fuzzFlush, err := loadFunc("fuzz_flush") - if err != nil { - return nil, err - } - fuzzGetRandom, err := loadFunc("fuzz_get_random") - if err != nil { - return nil, err - } - notifyCoverage, err := loadFunc("notify_coverage") - if err != nil { - return nil, err - } - initCoverage, err := loadFunc("init_coverage_module") - if err != nil { - return nil, err - } - return &voidstarHandler{fuzzJsonData, fuzzFlush, fuzzGetRandom, initCoverage, notifyCoverage}, nil } // If `localOutputEnvVar` is set to a non-empty path, attempt to open that path and truncate the file diff --git a/vendor/github.com/antithesishq/antithesis-sdk-go/internal/sdk_const.go b/vendor/github.com/antithesishq/antithesis-sdk-go/internal/sdk_const.go index 939582573e5..a4db4e7e1ef 100644 --- a/vendor/github.com/antithesishq/antithesis-sdk-go/internal/sdk_const.go +++ b/vendor/github.com/antithesishq/antithesis-sdk-go/internal/sdk_const.go @@ -3,7 +3,7 @@ package internal // -------------------------------------------------------------------------------- // Versions // -------------------------------------------------------------------------------- -const SDK_Version = "0.4.3" +const SDK_Version = "0.5.0" const Protocol_Version = "1.1.0" // -------------------------------------------------------------------------------- diff --git a/vendor/github.com/antithesishq/antithesis-sdk-go/internal/voidstar_handler.go b/vendor/github.com/antithesishq/antithesis-sdk-go/internal/voidstar_handler.go new file mode 100644 index 00000000000..eb410fd887c --- /dev/null +++ b/vendor/github.com/antithesishq/antithesis-sdk-go/internal/voidstar_handler.go @@ -0,0 +1,160 @@ +//go:build enable_antithesis_sdk && linux && amd64 && cgo + +package internal + +import ( + "fmt" + "unsafe" + "os" +) + +// -------------------------------------------------------------------------------- +// To build and run an executable with this package +// +// CC=clang CGO_ENABLED=1 go run ./main.go +// -------------------------------------------------------------------------------- + +// \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ +// +// The commented lines below, and the `import "C"` line which must directly follow +// the commented lines are used by CGO. They are load-bearing, and should not be +// changed without first understanding how CGO uses them. +// +// \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ + +// #cgo LDFLAGS: -ldl +// +// #include +// #include +// #include +// #include +// +// typedef void (*go_fuzz_json_data_fn)(const char *data, size_t size); +// void +// go_fuzz_json_data(void *f, const char *data, size_t size) { +// ((go_fuzz_json_data_fn)f)(data, size); +// } +// +// typedef void (*go_fuzz_flush_fn)(void); +// void +// go_fuzz_flush(void *f) { +// ((go_fuzz_flush_fn)f)(); +// } +// +// typedef uint64_t (*go_fuzz_get_random_fn)(void); +// uint64_t +// go_fuzz_get_random(void *f) { +// return ((go_fuzz_get_random_fn)f)(); +// } +// +// typedef bool (*go_notify_coverage_fn)(size_t); +// int +// go_notify_coverage(void *f, size_t edges) { +// bool b = ((go_notify_coverage_fn)f)(edges); +// return b ? 1 : 0; +// } +// +// typedef uint64_t (*go_init_coverage_fn)(size_t num_edges, const char *symbols); +// uint64_t +// go_init_coverage(void *f, size_t num_edges, const char *symbols) { +// return ((go_init_coverage_fn)f)(num_edges, symbols); +// } +// +import "C" + +const ( + defaultNativeLibraryPath = "/usr/lib/libvoidstar.so" +) + +type voidstarHandler struct { + fuzzJsonData unsafe.Pointer + fuzzFlush unsafe.Pointer + fuzzGetRandom unsafe.Pointer + initCoverage unsafe.Pointer + notifyCoverage unsafe.Pointer +} + +func (h *voidstarHandler) output(message string) { + msg_len := len(message) + if msg_len == 0 { + return + } + cstrMessage := C.CString(message) + defer C.free(unsafe.Pointer(cstrMessage)) + C.go_fuzz_json_data(h.fuzzJsonData, cstrMessage, C.ulong(msg_len)) + C.go_fuzz_flush(h.fuzzFlush) +} + +func (h *voidstarHandler) random() uint64 { + return uint64(C.go_fuzz_get_random(h.fuzzGetRandom)) +} + +func (h *voidstarHandler) init_coverage(num_edge uint64, symbols string) uint64 { + cstrSymbols := C.CString(symbols) + defer C.free(unsafe.Pointer(cstrSymbols)) + return uint64(C.go_init_coverage(h.initCoverage, C.ulong(num_edge), cstrSymbols)) +} + +func (h *voidstarHandler) notify(edge uint64) bool { + ival := int(C.go_notify_coverage(h.notifyCoverage, C.ulong(edge))) + return ival == 1 +} + +// Attempt to load libvoidstar and some symbols from `path` +func openSharedLib(path string) (*voidstarHandler, error) { + cstrPath := C.CString(path) + defer C.free(unsafe.Pointer(cstrPath)) + + dlError := func(message string) error { + return fmt.Errorf("%s: (%s)", message, C.GoString(C.dlerror())) + } + + sharedLib := C.dlopen(cstrPath, C.int(C.RTLD_NOW)) + if sharedLib == nil { + return nil, dlError("Can not load the Antithesis native library") + } + + loadFunc := func(name string) (symbol unsafe.Pointer, err error) { + cstrName := C.CString(name) + defer C.free(unsafe.Pointer(cstrName)) + if symbol = C.dlsym(sharedLib, cstrName); symbol == nil { + err = dlError(fmt.Sprintf("Can not access symbol %s", name)) + } + return + } + + fuzzJsonData, err := loadFunc("fuzz_json_data") + if err != nil { + return nil, err + } + fuzzFlush, err := loadFunc("fuzz_flush") + if err != nil { + return nil, err + } + fuzzGetRandom, err := loadFunc("fuzz_get_random") + if err != nil { + return nil, err + } + notifyCoverage, err := loadFunc("notify_coverage") + if err != nil { + return nil, err + } + initCoverage, err := loadFunc("init_coverage_module") + if err != nil { + return nil, err + } + return &voidstarHandler{fuzzJsonData, fuzzFlush, fuzzGetRandom, initCoverage, notifyCoverage}, nil +} + +// If we have a file at `defaultNativeLibraryPath`, we load the shared library +// (and panic on any error encountered during load). +func init_in_antithesis() libHandler { + if _, err := os.Stat(defaultNativeLibraryPath); err == nil { + handler, err := openSharedLib(defaultNativeLibraryPath) + if err != nil { + panic(err) + } + return handler + } + return nil +} diff --git a/vendor/github.com/antithesishq/antithesis-sdk-go/internal/voidstar_handler_noop.go b/vendor/github.com/antithesishq/antithesis-sdk-go/internal/voidstar_handler_noop.go new file mode 100644 index 00000000000..75c67a16593 --- /dev/null +++ b/vendor/github.com/antithesishq/antithesis-sdk-go/internal/voidstar_handler_noop.go @@ -0,0 +1,7 @@ +//go:build enable_antithesis_sdk && (!linux || !amd64 || !cgo) + +package internal + +func init_in_antithesis() libHandler { + return nil +} diff --git a/vendor/github.com/ceph/go-ceph/cephfs/errors.go b/vendor/github.com/ceph/go-ceph/cephfs/errors.go index 7385ae6186a..1965ca4a87a 100644 --- a/vendor/github.com/ceph/go-ceph/cephfs/errors.go +++ b/vendor/github.com/ceph/go-ceph/cephfs/errors.go @@ -40,6 +40,8 @@ var ( // ErrOpNotSupported is returned in general for operations that are not // supported. ErrOpNotSupported = getError(-C.EOPNOTSUPP) + // ErrNotImplemented indicates a function is not implemented in by libcephfs. + ErrNotImplemented = getError(-C.ENOSYS) // Private errors: diff --git a/vendor/github.com/ceph/go-ceph/cephfs/file_fd.go b/vendor/github.com/ceph/go-ceph/cephfs/file_fd.go index 1c8b44037bf..0e6b0e3e439 100644 --- a/vendor/github.com/ceph/go-ceph/cephfs/file_fd.go +++ b/vendor/github.com/ceph/go-ceph/cephfs/file_fd.go @@ -1,5 +1,3 @@ -//go:build ceph_preview - package cephfs // Fd returns the integer open file descriptor in cephfs. diff --git a/vendor/github.com/ceph/go-ceph/cephfs/file_futime.go b/vendor/github.com/ceph/go-ceph/cephfs/file_futime.go index 31382f56bd1..0dfcf70e205 100644 --- a/vendor/github.com/ceph/go-ceph/cephfs/file_futime.go +++ b/vendor/github.com/ceph/go-ceph/cephfs/file_futime.go @@ -1,5 +1,3 @@ -//go:build ceph_preview - package cephfs // Futime changes file/directory last access and modification times. diff --git a/vendor/github.com/ceph/go-ceph/cephfs/snap_diff.go b/vendor/github.com/ceph/go-ceph/cephfs/snap_diff.go new file mode 100644 index 00000000000..2d5553a567f --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/cephfs/snap_diff.go @@ -0,0 +1,268 @@ +//go:build ceph_preview + +package cephfs + +/* +#cgo LDFLAGS: -lcephfs +#cgo CPPFLAGS: -D_FILE_OFFSET_BITS=64 +#include +#include +#include + +// Types and constants are copied from libcephfs.h with added "_" as prefix. This +// prevents redefinition of the types on libcephfs versions that have them +// already. + +typedef struct { + struct dirent dir_entry; + uint64_t snapid; +} _ceph_snapdiff_entry_t; + +typedef struct { + struct ceph_mount_info* cmount; + struct ceph_dir_result* dir1; + struct ceph_dir_result* dir_aux; +} _ceph_snapdiff_info; + +// open_snapdiff_fn matches the open_snapdiff function signature. +typedef int(*open_snapdiff_fn)(struct ceph_mount_info* cmount, + const char* root_path, + const char* rel_path, + const char* snap1, + const char* snap2, + _ceph_snapdiff_info* out); + +// open_snapdiff_dlsym take *fn as open_snapdiff_fn and calls the dynamically loaded +// open_snapdiff function passed as 1st argument. +static inline int open_snapdiff_dlsym(void *fn, + struct ceph_mount_info* cmount, + const char* root_path, + const char* rel_path, + const char* snap1, + const char* snap2, + _ceph_snapdiff_info* out) { + // cast function pointer fn to open_snapdiff and call the function + return ((open_snapdiff_fn) fn)(cmount, root_path, rel_path, snap1, snap2, out); +} + +// readdir_snapdiff_fn matches the readdir_snapdiff function signature. +typedef int(*readdir_snapdiff_fn)(_ceph_snapdiff_info* snapdiff, + _ceph_snapdiff_entry_t* out); + +// readdir_snapdiff_dlsym take *fn as readdir_snapdiff_fn and calls the dynamically loaded +// readdir_snapdiff function passed as 1st argument. +static inline int readdir_snapdiff_dlsym(void *fn, + _ceph_snapdiff_info* snapdiff, + _ceph_snapdiff_entry_t* out) { + // cast function pointer fn to readdir_snapdiff and call the function + return ((readdir_snapdiff_fn) fn)(snapdiff, out); +} + +// close_snapdiff_fn matches the close_snapdiff function signature. +typedef int(*close_snapdiff_fn)(_ceph_snapdiff_info* snapdiff); + +// close_snapdiff_dlsym take *fn as close_snapdiff_fn and calls the dynamically loaded +// close_snapdiff function passed as 1st argument. +static inline int close_snapdiff_dlsym(void *fn, + _ceph_snapdiff_info* snapdiff) { + // cast function pointer fn to close_snapdiff and call the function + return ((close_snapdiff_fn) fn)(snapdiff); +} +*/ +import "C" + +import ( + "fmt" + "sync" + "unsafe" + + "github.com/ceph/go-ceph/internal/dlsym" +) + +var ( + cephOpenSnapDiffOnce sync.Once + cephReaddirSnapDiffOnce sync.Once + cephCloseSnapDiffOnce sync.Once + cephOpenSnapDiff unsafe.Pointer + cephReaddirSnapDiff unsafe.Pointer + cephCloseSnapDiff unsafe.Pointer + cephOpenSnapDiffErr error + cephReaddirSnapDiffErr error + cephCloseSnapDiffErr error +) + +// SnapDiffInfo is a handle to a snapshot diff API. +type SnapDiffInfo struct { + cMount *MountInfo + dir1 *Directory + dirAux *Directory +} + +// SnapDiffEntry is a single entry in the snapshot diff. +// It contains a DirEntry and the ID of the snapshot to +// which the directory belongs. +type SnapDiffEntry struct { + DirEntry *DirEntry + SnapID uint64 +} + +// SnapDiffConfig is used to define the parameters of a open_snapdiff call. +// Snapshot Delta is generated between the passed snapshots snap1 and snap2. +// All fields must be specified before passing to OpenSnapDiff(). +type SnapDiffConfig struct { + // CMount is the ceph mount handle that will be used for snap.diff retrieval. + CMount *MountInfo + // RootPath represents the root path for snapshots-in-question. + RootPath string + // RelPath is the subpath under the root to build delta for. + RelPath string + // Snap1 is the first snapshot name. + Snap1 string + // Snap2 is the second snapshot name. + Snap2 string +} + +// OpenSnapDiff opens a snapshot diff stream to get snapshots delta +// and returns a SnapDiffInfo struct containing the diff information. +// +// Implements: +// +// int ceph_open_snapdiff(struct ceph_mount_info* cmount, +// const char* root_path, +// const char* rel_path, +// const char* snap1, +// const char* snap2, +// struct ceph_snapdiff_info* out); +func OpenSnapDiff(config SnapDiffConfig) (*SnapDiffInfo, error) { + if config.CMount == nil || config.RootPath == "" || config.RelPath == "" || + config.Snap1 == "" || config.Snap2 == "" { + return nil, errInvalid + } + + cephOpenSnapDiffOnce.Do(func() { + cephOpenSnapDiff, cephOpenSnapDiffErr = dlsym.LookupSymbol("ceph_open_snapdiff") + }) + + if cephOpenSnapDiffErr != nil { + return nil, fmt.Errorf("%w: %w", ErrNotImplemented, cephOpenSnapDiffErr) + } + + rawCephSnapDiffInfo := &C._ceph_snapdiff_info{} + + ret := C.open_snapdiff_dlsym( + cephOpenSnapDiff, + config.CMount.mount, + C.CString(config.RootPath), + C.CString(config.RelPath), + C.CString(config.Snap1), + C.CString(config.Snap2), + rawCephSnapDiffInfo) + + if ret != 0 { + return nil, getError(ret) + } + + mountInfo := &MountInfo{ + mount: rawCephSnapDiffInfo.cmount, + } + cephSnapDiffInfo := &SnapDiffInfo{ + cMount: mountInfo, + dir1: &Directory{ + mount: mountInfo, + dir: rawCephSnapDiffInfo.dir1, + }, + dirAux: &Directory{ + mount: mountInfo, + dir: rawCephSnapDiffInfo.dir_aux, + }, + } + + return cephSnapDiffInfo, nil +} + +// validate checks that the SnapDiffInfo struct is valid. +func (info *SnapDiffInfo) validate() error { + if info.cMount == nil || info.dir1 == nil || info.dirAux == nil { + return errInvalid + } + + return nil +} + +// Readdir returns the next entry in the snapshot diff stream. +// If there are no more entries, it returns (nil, nil). +// +// Implements: +// +// int ceph_readdir_snapdiff(struct ceph_snapdiff_info* snapdiff, +// struct ceph_snapdiff_entry_t* out); +func (info *SnapDiffInfo) Readdir() (*SnapDiffEntry, error) { + if err := info.validate(); err != nil { + return nil, err + } + + cephReaddirSnapDiffOnce.Do(func() { + cephReaddirSnapDiff, cephReaddirSnapDiffErr = dlsym.LookupSymbol("ceph_readdir_snapdiff") + }) + if cephReaddirSnapDiffErr != nil { + return nil, fmt.Errorf("%w: %w", ErrNotImplemented, cephReaddirSnapDiffErr) + } + + rawSnapDiffEntry := &C._ceph_snapdiff_entry_t{} + rawSnapDiffInfo := &C._ceph_snapdiff_info{ + cmount: info.cMount.mount, + dir1: info.dir1.dir, + dir_aux: info.dirAux.dir, + } + + ret := C.readdir_snapdiff_dlsym( + cephReaddirSnapDiff, + rawSnapDiffInfo, + rawSnapDiffEntry) + if ret < 0 { + return nil, getError(ret) + } + if ret == 0 { + // return 0 indicates there is not more entries to return. + return nil, nil + } + + snapDiffEntry := &SnapDiffEntry{ + DirEntry: toDirEntry(&rawSnapDiffEntry.dir_entry), + SnapID: uint64(rawSnapDiffEntry.snapid), + } + return snapDiffEntry, nil +} + +// Close closes the snapshot diff handle. +// +// Implements: +// +// int ceph_close_snapdiff(struct ceph_snapdiff_info* snapdiff); +func (info *SnapDiffInfo) Close() error { + if err := info.validate(); err != nil { + return err + } + + cephCloseSnapDiffOnce.Do(func() { + cephCloseSnapDiff, cephCloseSnapDiffErr = dlsym.LookupSymbol("ceph_close_snapdiff") + }) + if cephCloseSnapDiffErr != nil { + return fmt.Errorf("%w: %w", ErrNotImplemented, cephCloseSnapDiffErr) + } + + rawCephSnapDiffInfo := &C._ceph_snapdiff_info{ + cmount: info.cMount.mount, + dir1: info.dir1.dir, + dir_aux: info.dirAux.dir, + } + ret := C.close_snapdiff_dlsym( + cephCloseSnapDiff, + rawCephSnapDiffInfo) + + if ret != 0 { + return getError(ret) + } + + return nil +} diff --git a/vendor/github.com/ceph/go-ceph/internal/dlsym/dlsym.go b/vendor/github.com/ceph/go-ceph/internal/dlsym/dlsym.go new file mode 100644 index 00000000000..0c1ef1ef7bc --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/internal/dlsym/dlsym.go @@ -0,0 +1,39 @@ +package dlsym + +// #cgo LDFLAGS: -ldl +// +// #define _GNU_SOURCE +// +// #include +// #include +import "C" + +import ( + "errors" + "fmt" + "unsafe" +) + +// ErrUndefinedSymbol is returned by LookupSymbol when the requested symbol +// could not be found. +var ErrUndefinedSymbol = errors.New("symbol not found") + +// LookupSymbol resolves the named symbol from the already dynamically loaded +// libraries. If the symbol is found, a pointer to it is returned, in case of a +// failure, the message provided by dlerror() is included in the error message. +func LookupSymbol(symbol string) (unsafe.Pointer, error) { + cSymName := C.CString(symbol) + defer C.free(unsafe.Pointer(cSymName)) + + // clear dlerror before looking up the symbol + C.dlerror() + // resolve the address of the symbol + sym := C.dlsym(C.RTLD_DEFAULT, cSymName) + e := C.dlerror() + dlerr := C.GoString(e) + if dlerr != "" { + return nil, fmt.Errorf("%w: %s", ErrUndefinedSymbol, dlerr) + } + + return sym, nil +} diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go b/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go index f6a7ea8a580..2659518cc48 100644 --- a/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go +++ b/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go @@ -162,7 +162,7 @@ var supportedAlgorithms = map[string]bool{ // parsing. // // // Directly fetch the metadata document. -// resp, err := http.Get("https://login.example.com/custom-metadata-path") +// resp, err := http.Get("https://login.example.com/custom-metadata-path") // if err != nil { // // ... // } @@ -267,7 +267,7 @@ func NewProvider(ctx context.Context, issuer string) (*Provider, error) { issuerURL = issuer } if p.Issuer != issuerURL && !skipIssuerValidation { - return nil, fmt.Errorf("oidc: issuer did not match the issuer returned by provider, expected %q got %q", issuer, p.Issuer) + return nil, fmt.Errorf("oidc: issuer URL provided to client (%q) did not match the issuer URL returned by provider (%q)", issuer, p.Issuer) } var algs []string for _, a := range p.Algorithms { diff --git a/vendor/github.com/coreos/go-systemd/v22/journal/journal.go b/vendor/github.com/coreos/go-systemd/v22/journal/journal.go index ac24c7767d3..16c4e47751c 100644 --- a/vendor/github.com/coreos/go-systemd/v22/journal/journal.go +++ b/vendor/github.com/coreos/go-systemd/v22/journal/journal.go @@ -41,6 +41,6 @@ const ( ) // Print prints a message to the local systemd journal using Send(). -func Print(priority Priority, format string, a ...interface{}) error { +func Print(priority Priority, format string, a ...any) error { return Send(fmt.Sprintf(format, a...), priority, nil) } diff --git a/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go b/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go index c5b23a81968..6266e16e573 100644 --- a/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go +++ b/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build !windows -// +build !windows // Package journal provides write bindings to the local systemd journal. // It is implemented in pure Go and connects to the journal directly over its @@ -31,7 +30,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net" "os" "strconv" @@ -194,7 +192,7 @@ func appendVariable(w io.Writer, name, value string) { * - the data, followed by a newline */ fmt.Fprintln(w, name) - binary.Write(w, binary.LittleEndian, uint64(len(value))) + _ = binary.Write(w, binary.LittleEndian, uint64(len(value))) fmt.Fprintln(w, value) } else { /* just write the variable and value all on one line */ @@ -214,7 +212,7 @@ func validVarName(name string) error { } for _, c := range name { - if !(('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_') { + if ('A' > c || c > 'Z') && ('0' > c || c > '9') && c != '_' { return errors.New("Variable name contains invalid characters") } } @@ -239,7 +237,7 @@ func isSocketSpaceError(err error) bool { // tempFd creates a temporary, unlinked file under `/dev/shm`. func tempFd() (*os.File, error) { - file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX") + file, err := os.CreateTemp("/dev/shm/", "journal.XXXXX") if err != nil { return nil, err } diff --git a/vendor/github.com/gabriel-vasile/mimetype/.git-blame-ignore-revs b/vendor/github.com/gabriel-vasile/mimetype/.git-blame-ignore-revs new file mode 100644 index 00000000000..3892826a749 --- /dev/null +++ b/vendor/github.com/gabriel-vasile/mimetype/.git-blame-ignore-revs @@ -0,0 +1,9 @@ +# Github is obeying this ignore file by default. +# Run this command on local to ignore formatting commits in `git blame` +# git config blame.ignoreRevsFile .git-blame-ignore-revs + +# Added a new column to supported_mimes.md +# The supported_mimes.md file was a nice way to find when a file format was +# introduced. However, when I changed to add a new column in the table, the +# whole git blame got poisoned for the file. +eb497f9bc5d31c6eab2929a112051218670137ba diff --git a/vendor/github.com/gabriel-vasile/mimetype/.golangci.yml b/vendor/github.com/gabriel-vasile/mimetype/.golangci.yml index f2058ccc573..5b30cd614d8 100644 --- a/vendor/github.com/gabriel-vasile/mimetype/.golangci.yml +++ b/vendor/github.com/gabriel-vasile/mimetype/.golangci.yml @@ -1,5 +1,41 @@ version: "2" + +run: + timeout: 5m + linters: exclusions: presets: - std-error-handling + enable: + - gosec # Detects security problems. + # Keep all extras disabled for now to focus on the integer overflow problem. + # TODO: enable these and other good linters + - dogsled # Detects assignments with too many blank identifiers. + - errcheck + - errchkjson # Detects unsupported types passed to json encoding functions and reports if checks for the returned error can be omitted. + - exhaustive # Detects missing options in enum switch statements. + - gocyclo + - govet + - ineffassign + - makezero # Finds slice declarations with non-zero initial length. + - misspell # Detects commonly misspelled English words in comments. + - nakedret # Detects uses of naked returns. + - prealloc # Detects slice declarations that could potentially be pre-allocated. + - predeclared # Detects code that shadows one of Go's predeclared identifiers. + - reassign # Detects reassigning a top-level variable in another package. + - staticcheck + - thelper # Detects test helpers without t.Helper(). + - tparallel # Detects inappropriate usage of t.Parallel(). + - unconvert # Detects unnecessary type conversions. + - unused + - usestdlibvars # Detects the possibility to use variables/constants from the Go standard library. + - usetesting # Reports uses of functions with replacement inside the testing package. + settings: + govet: + disable: + - stdversion + gosec: + excludes: + - G404 # Weak random number generator used in tests. + - G304 # File inclusion diff --git a/vendor/github.com/gabriel-vasile/mimetype/README.md b/vendor/github.com/gabriel-vasile/mimetype/README.md index f28f56c9bf6..9fe71ac9455 100644 --- a/vendor/github.com/gabriel-vasile/mimetype/README.md +++ b/vendor/github.com/gabriel-vasile/mimetype/README.md @@ -70,13 +70,13 @@ If increasing the limit does not help, please ## Tests In addition to unit tests, [mimetype_tests](https://github.com/gabriel-vasile/mimetype_tests) compares the -library with the [Unix file utility](https://en.wikipedia.org/wiki/File_(command)) +library with [libmagic](https://en.wikipedia.org/wiki/File_(command)) for around 50 000 sample files. Check the latest comparison results [here](https://github.com/gabriel-vasile/mimetype_tests/actions). ## Benchmarks -Benchmarks for each file format are performed when a PR is open. The results can -be seen on the [workflows page](https://github.com/gabriel-vasile/mimetype/actions/workflows/benchmark.yml). +Benchmarks are performed when a PR is open. The results can be seen on the +[workflows page](https://github.com/gabriel-vasile/mimetype/actions/workflows/benchmark.yml). Performance improvements are welcome but correctness is prioritized. ## Structure @@ -97,7 +97,9 @@ or from a [file](https://pkg.go.dev/github.com/gabriel-vasile/mimetype#DetectFil ## Contributing -Contributions are unexpected but welcome. When submitting a PR for detection of -a new file format, please make sure to add a record to the list of testcases -from [mimetype_test.go](mimetype_test.go). For complex files a record can be added -in the [testdata](testdata) directory. +Contributions are never expected but very much welcome. +[mimetype_tests](https://github.com/gabriel-vasile/mimetype_tests/actions/workflows/test.yml) +shows which file formats are most often misidentified and can help prioritise. +When submitting a PR for detection of a new file format, please make sure to +add a record to the list of testcases in [mimetype_test.go](mimetype_test.go). +For complex files a record can be added in the [testdata](testdata) directory. diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/charset/charset.go b/vendor/github.com/gabriel-vasile/mimetype/internal/charset/charset.go index 8c5a05e4d50..3373274ad9d 100644 --- a/vendor/github.com/gabriel-vasile/mimetype/internal/charset/charset.go +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/charset/charset.go @@ -2,6 +2,7 @@ package charset import ( "bytes" + "strings" "unicode/utf8" "github.com/gabriel-vasile/mimetype/internal/markup" @@ -141,27 +142,25 @@ func FromXML(content []byte) string { return FromPlain(content) } func fromXML(s scan.Bytes) string { - xml := []byte(" 1 && + raw[0] == 'x' && binary.BigEndian.Uint16(raw)%31 == 0 + // Check that the file is not a regular text to avoid false positives. + return zlib && !Text(raw, 0) +} diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/audio.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/audio.go index d17e32482c9..2b160711ff7 100644 --- a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/audio.go +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/audio.go @@ -5,26 +5,50 @@ import ( "encoding/binary" ) -var ( - // Flac matches a Free Lossless Audio Codec file. - Flac = prefix([]byte("\x66\x4C\x61\x43\x00\x00\x00\x22")) - // Midi matches a Musical Instrument Digital Interface file. - Midi = prefix([]byte("\x4D\x54\x68\x64")) - // Ape matches a Monkey's Audio file. - Ape = prefix([]byte("\x4D\x41\x43\x20\x96\x0F\x00\x00\x34\x00\x00\x00\x18\x00\x00\x00\x90\xE3")) - // MusePack matches a Musepack file. - MusePack = prefix([]byte("MPCK")) - // Au matches a Sun Microsystems au file. - Au = prefix([]byte("\x2E\x73\x6E\x64")) - // Amr matches an Adaptive Multi-Rate file. - Amr = prefix([]byte("\x23\x21\x41\x4D\x52")) - // Voc matches a Creative Voice file. - Voc = prefix([]byte("Creative Voice File")) - // M3u matches a Playlist file. - M3u = prefix([]byte("#EXTM3U")) - // AAC matches an Advanced Audio Coding file. - AAC = prefix([]byte{0xFF, 0xF1}, []byte{0xFF, 0xF9}) -) +// Flac matches a Free Lossless Audio Codec file. +func Flac(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte("\x66\x4C\x61\x43\x00\x00\x00\x22")) +} + +// Midi matches a Musical Instrument Digital Interface file. +func Midi(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte("\x4D\x54\x68\x64")) +} + +// Ape matches a Monkey's Audio file. +func Ape(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte("\x4D\x41\x43\x20\x96\x0F\x00\x00\x34\x00\x00\x00\x18\x00\x00\x00\x90\xE3")) +} + +// MusePack matches a Musepack file. +func MusePack(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte("MPCK")) +} + +// Au matches a Sun Microsystems au file. +func Au(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte("\x2E\x73\x6E\x64")) +} + +// Amr matches an Adaptive Multi-Rate file. +func Amr(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte("\x23\x21\x41\x4D\x52")) +} + +// Voc matches a Creative Voice file. +func Voc(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte("Creative Voice File")) +} + +// M3u matches a Playlist file. +func M3u(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte("#EXTM3U")) +} + +// AAC matches an Advanced Audio Coding file. +func AAC(raw []byte, _ uint32) bool { + return len(raw) > 1 && ((raw[0] == 0xFF && raw[1] == 0xF1) || (raw[0] == 0xFF && raw[1] == 0xF9)) +} // Mp3 matches an mp3 file. func Mp3(raw []byte, limit uint32) bool { diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/binary.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/binary.go index 70599b34209..37ad6a9fb14 100644 --- a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/binary.go +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/binary.go @@ -6,26 +6,52 @@ import ( "encoding/binary" ) -var ( - // Lnk matches Microsoft lnk binary format. - Lnk = prefix([]byte{0x4C, 0x00, 0x00, 0x00, 0x01, 0x14, 0x02, 0x00}) - // Wasm matches a web assembly File Format file. - Wasm = prefix([]byte{0x00, 0x61, 0x73, 0x6D}) - // Exe matches a Windows/DOS executable file. - Exe = prefix([]byte{0x4D, 0x5A}) - // Elf matches an Executable and Linkable Format file. - Elf = prefix([]byte{0x7F, 0x45, 0x4C, 0x46}) - // Nes matches a Nintendo Entertainment system ROM file. - Nes = prefix([]byte{0x4E, 0x45, 0x53, 0x1A}) - // SWF matches an Adobe Flash swf file. - SWF = prefix([]byte("CWS"), []byte("FWS"), []byte("ZWS")) - // Torrent has bencoded text in the beginning. - Torrent = prefix([]byte("d8:announce")) - // PAR1 matches a parquet file. - Par1 = prefix([]byte{0x50, 0x41, 0x52, 0x31}) - // CBOR matches a Concise Binary Object Representation https://cbor.io/ - CBOR = prefix([]byte{0xD9, 0xD9, 0xF7}) -) +// Lnk matches Microsoft lnk binary format. +func Lnk(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte{0x4C, 0x00, 0x00, 0x00, 0x01, 0x14, 0x02, 0x00}) +} + +// Wasm matches a web assembly File Format file. +func Wasm(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte{0x00, 0x61, 0x73, 0x6D}) +} + +// Exe matches a Windows/DOS executable file. +func Exe(raw []byte, _ uint32) bool { + return len(raw) > 1 && raw[0] == 0x4D && raw[1] == 0x5A +} + +// Elf matches an Executable and Linkable Format file. +func Elf(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte{0x7F, 0x45, 0x4C, 0x46}) +} + +// Nes matches a Nintendo Entertainment system ROM file. +func Nes(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte{0x4E, 0x45, 0x53, 0x1A}) +} + +// SWF matches an Adobe Flash swf file. +func SWF(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte("CWS")) || + bytes.HasPrefix(raw, []byte("FWS")) || + bytes.HasPrefix(raw, []byte("ZWS")) +} + +// Torrent has bencoded text in the beginning. +func Torrent(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte("d8:announce")) +} + +// PAR1 matches a parquet file. +func Par1(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte{0x50, 0x41, 0x52, 0x31}) +} + +// CBOR matches a Concise Binary Object Representation https://cbor.io/ +func CBOR(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte{0xD9, 0xD9, 0xF7}) +} // Java bytecode and Mach-O binaries share the same magic number. // More info here https://github.com/threatstack/libmagic/blob/master/magic/Magdir/cafebabe @@ -168,8 +194,10 @@ func Marc(raw []byte, limit uint32) bool { // // [glTF specification]: https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html // [IANA glTF entry]: https://www.iana.org/assignments/media-types/model/gltf-binary -var GLB = prefix([]byte("\x67\x6C\x54\x46\x02\x00\x00\x00"), - []byte("\x67\x6C\x54\x46\x01\x00\x00\x00")) +func GLB(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte("\x67\x6C\x54\x46\x02\x00\x00\x00")) || + bytes.HasPrefix(raw, []byte("\x67\x6C\x54\x46\x01\x00\x00\x00")) +} // TzIf matches a Time Zone Information Format (TZif) file. // See more: https://tools.ietf.org/id/draft-murchison-tzdist-tzif-00.html#rfc.section.3 diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/database.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/database.go index cb1fed12f7a..35c3f911eaa 100644 --- a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/database.go +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/database.go @@ -1,13 +1,21 @@ package magic -var ( - // Sqlite matches an SQLite database file. - Sqlite = prefix([]byte{ +import "bytes" + +// Sqlite matches an SQLite database file. +func Sqlite(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte{ 0x53, 0x51, 0x4c, 0x69, 0x74, 0x65, 0x20, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x20, 0x33, 0x00, }) - // MsAccessAce matches Microsoft Access dababase file. - MsAccessAce = offset([]byte("Standard ACE DB"), 4) - // MsAccessMdb matches legacy Microsoft Access database file (JET, 2003 and earlier). - MsAccessMdb = offset([]byte("Standard Jet DB"), 4) -) +} + +// MsAccessAce matches Microsoft Access dababase file. +func MsAccessAce(raw []byte, _ uint32) bool { + return offset(raw, []byte("Standard ACE DB"), 4) +} + +// MsAccessMdb matches legacy Microsoft Access database file (JET, 2003 and earlier). +func MsAccessMdb(raw []byte, _ uint32) bool { + return offset(raw, []byte("Standard Jet DB"), 4) +} diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/document.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/document.go index 7f9308db3b7..72080395807 100644 --- a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/document.go +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/document.go @@ -5,14 +5,31 @@ import ( "encoding/binary" ) -var ( - // Fdf matches a Forms Data Format file. - Fdf = prefix([]byte("%FDF")) - // Mobi matches a Mobi file. - Mobi = offset([]byte("BOOKMOBI"), 60) - // Lit matches a Microsoft Lit file. - Lit = prefix([]byte("ITOLITLS")) -) +// Pdf matches a Portable Document Format file. +// https://github.com/file/file/blob/11010cc805546a3e35597e67e1129a481aed40e8/magic/Magdir/pdf +func Pdf(raw []byte, _ uint32) bool { + // usual pdf signature + return bytes.HasPrefix(raw, []byte("%PDF-")) || + // new-line prefixed signature + bytes.HasPrefix(raw, []byte("\012%PDF-")) || + // UTF-8 BOM prefixed signature + bytes.HasPrefix(raw, []byte("\xef\xbb\xbf%PDF-")) +} + +// Fdf matches a Forms Data Format file. +func Fdf(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte("%FDF")) +} + +// Mobi matches a Mobi file. +func Mobi(raw []byte, _ uint32) bool { + return offset(raw, []byte("BOOKMOBI"), 60) +} + +// Lit matches a Microsoft Lit file. +func Lit(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte("ITOLITLS")) +} // PDF matches a Portable Document Format file. // The %PDF- header should be the first thing inside the file but many diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/font.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/font.go index 43af28212e7..857cfcc552c 100644 --- a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/font.go +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/font.go @@ -4,14 +4,20 @@ import ( "bytes" ) -var ( - // Woff matches a Web Open Font Format file. - Woff = prefix([]byte("wOFF")) - // Woff2 matches a Web Open Font Format version 2 file. - Woff2 = prefix([]byte("wOF2")) - // Otf matches an OpenType font file. - Otf = prefix([]byte{0x4F, 0x54, 0x54, 0x4F, 0x00}) -) +// Woff matches a Web Open Font Format file. +func Woff(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte("wOFF")) +} + +// Woff2 matches a Web Open Font Format version 2 file. +func Woff2(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte("wOF2")) +} + +// Otf matches an OpenType font file. +func Otf(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte{0x4F, 0x54, 0x54, 0x4F, 0x00}) +} // Ttf matches a TrueType font file. func Ttf(raw []byte, limit uint32) bool { diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ftyp.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ftyp.go index ac727139ef2..fc642ef433c 100644 --- a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ftyp.go +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ftyp.go @@ -4,24 +4,33 @@ import ( "bytes" ) -var ( - // AVIF matches an AV1 Image File Format still or animated. - // Wikipedia page seems outdated listing image/avif-sequence for animations. - // https://github.com/AOMediaCodec/av1-avif/issues/59 - AVIF = ftyp([]byte("avif"), []byte("avis")) - // ThreeGP matches a 3GPP file. - ThreeGP = ftyp( +// AVIF matches an AV1 Image File Format still or animated. +// Wikipedia page seems outdated listing image/avif-sequence for animations. +// https://github.com/AOMediaCodec/av1-avif/issues/59 +func AVIF(raw []byte, _ uint32) bool { + return ftyp(raw, []byte("avif"), []byte("avis")) +} + +// ThreeGP matches a 3GPP file. +func ThreeGP(raw []byte, _ uint32) bool { + return ftyp(raw, []byte("3gp1"), []byte("3gp2"), []byte("3gp3"), []byte("3gp4"), []byte("3gp5"), []byte("3gp6"), []byte("3gp7"), []byte("3gs7"), []byte("3ge6"), []byte("3ge7"), []byte("3gg6"), ) - // ThreeG2 matches a 3GPP2 file. - ThreeG2 = ftyp( +} + +// ThreeG2 matches a 3GPP2 file. +func ThreeG2(raw []byte, _ uint32) bool { + return ftyp(raw, []byte("3g24"), []byte("3g25"), []byte("3g26"), []byte("3g2a"), []byte("3g2b"), []byte("3g2c"), []byte("KDDI"), ) - // AMp4 matches an audio MP4 file. - AMp4 = ftyp( +} + +// AMp4 matches an audio MP4 file. +func AMp4(raw []byte, _ uint32) bool { + return ftyp(raw, // audio for Adobe Flash Player 9+ []byte("F4A "), []byte("F4B "), // Apple iTunes AAC-LC (.M4A) Audio @@ -31,33 +40,61 @@ var ( // Nero Digital AAC Audio []byte("NDAS"), ) - // Mqv matches a Sony / Mobile QuickTime file. - Mqv = ftyp([]byte("mqt ")) - // M4a matches an audio M4A file. - M4a = ftyp([]byte("M4A ")) - // M4v matches an Appl4 M4V video file. - M4v = ftyp([]byte("M4V "), []byte("M4VH"), []byte("M4VP")) - // Heic matches a High Efficiency Image Coding (HEIC) file. - Heic = ftyp([]byte("heic"), []byte("heix")) - // HeicSequence matches a High Efficiency Image Coding (HEIC) file sequence. - HeicSequence = ftyp([]byte("hevc"), []byte("hevx")) - // Heif matches a High Efficiency Image File Format (HEIF) file. - Heif = ftyp([]byte("mif1"), []byte("heim"), []byte("heis"), []byte("avic")) - // HeifSequence matches a High Efficiency Image File Format (HEIF) file sequence. - HeifSequence = ftyp([]byte("msf1"), []byte("hevm"), []byte("hevs"), []byte("avcs")) - // Mj2 matches a Motion JPEG 2000 file: https://en.wikipedia.org/wiki/Motion_JPEG_2000. - Mj2 = ftyp([]byte("mj2s"), []byte("mjp2"), []byte("MFSM"), []byte("MGSV")) - // Dvb matches a Digital Video Broadcasting file: https://dvb.org. - // https://cconcolato.github.io/mp4ra/filetype.html - // https://github.com/file/file/blob/512840337ead1076519332d24fefcaa8fac36e06/magic/Magdir/animation#L135-L154 - Dvb = ftyp( +} + +// Mqv matches a Sony / Mobile QuickTime file. +func Mqv(raw []byte, _ uint32) bool { + return ftyp(raw, []byte("mqt ")) +} + +// M4a matches an audio M4A file. +func M4a(raw []byte, _ uint32) bool { + return ftyp(raw, []byte("M4A ")) +} + +// M4v matches an Appl4 M4V video file. +func M4v(raw []byte, _ uint32) bool { + return ftyp(raw, []byte("M4V "), []byte("M4VH"), []byte("M4VP")) +} + +// Heic matches a High Efficiency Image Coding (HEIC) file. +func Heic(raw []byte, _ uint32) bool { + return ftyp(raw, []byte("heic"), []byte("heix")) +} + +// HeicSequence matches a High Efficiency Image Coding (HEIC) file sequence. +func HeicSequence(raw []byte, _ uint32) bool { + return ftyp(raw, []byte("hevc"), []byte("hevx")) +} + +// Heif matches a High Efficiency Image File Format (HEIF) file. +func Heif(raw []byte, _ uint32) bool { + return ftyp(raw, []byte("mif1"), []byte("heim"), []byte("heis"), []byte("avic")) +} + +// HeifSequence matches a High Efficiency Image File Format (HEIF) file sequence. +func HeifSequence(raw []byte, _ uint32) bool { + return ftyp(raw, []byte("msf1"), []byte("hevm"), []byte("hevs"), []byte("avcs")) +} + +// Mj2 matches a Motion JPEG 2000 file: https://en.wikipedia.org/wiki/Motion_JPEG_2000. +func Mj2(raw []byte, _ uint32) bool { + return ftyp(raw, []byte("mj2s"), []byte("mjp2"), []byte("MFSM"), []byte("MGSV")) +} + +// Dvb matches a Digital Video Broadcasting file: https://dvb.org. +// https://cconcolato.github.io/mp4ra/filetype.html +// https://github.com/file/file/blob/512840337ead1076519332d24fefcaa8fac36e06/magic/Magdir/animation#L135-L154 +func Dvb(raw []byte, _ uint32) bool { + return ftyp(raw, []byte("dby1"), []byte("dsms"), []byte("dts1"), []byte("dts2"), []byte("dts3"), []byte("dxo "), []byte("dmb1"), []byte("dmpf"), []byte("drc1"), []byte("dv1a"), []byte("dv1b"), []byte("dv2a"), []byte("dv2b"), []byte("dv3a"), []byte("dv3b"), []byte("dvr1"), []byte("dvt1"), []byte("emsg")) - // TODO: add support for remaining video formats at ftyps.com. -) +} + +// TODO: add support for remaining video formats at ftyps.com. // QuickTime matches a QuickTime File Format file. // https://www.loc.gov/preservation/digital/formats/fdd/fdd000052.shtml diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/image.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/image.go index 0eb7e95f375..788f5478b49 100644 --- a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/image.go +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/image.go @@ -2,66 +2,127 @@ package magic import "bytes" -var ( - // Png matches a Portable Network Graphics file. - // https://www.w3.org/TR/PNG/ - Png = prefix([]byte{0x89, 0x50, 0x4E, 0x47, 0x0D, 0x0A, 0x1A, 0x0A}) - // Apng matches an Animated Portable Network Graphics file. - // https://wiki.mozilla.org/APNG_Specification - Apng = offset([]byte("acTL"), 37) - // Jpg matches a Joint Photographic Experts Group file. - Jpg = prefix([]byte{0xFF, 0xD8, 0xFF}) - // Jp2 matches a JPEG 2000 Image file (ISO 15444-1). - Jp2 = jpeg2k([]byte{0x6a, 0x70, 0x32, 0x20}) - // Jpx matches a JPEG 2000 Image file (ISO 15444-2). - Jpx = jpeg2k([]byte{0x6a, 0x70, 0x78, 0x20}) - // Jpm matches a JPEG 2000 Image file (ISO 15444-6). - Jpm = jpeg2k([]byte{0x6a, 0x70, 0x6D, 0x20}) - // Gif matches a Graphics Interchange Format file. - Gif = prefix([]byte("GIF87a"), []byte("GIF89a")) - // Bmp matches a bitmap image file. - Bmp = prefix([]byte{0x42, 0x4D}) - // Ps matches a PostScript file. - Ps = prefix([]byte("%!PS-Adobe-")) - // Psd matches a Photoshop Document file. - Psd = prefix([]byte("8BPS")) - // Ico matches an ICO file. - Ico = prefix([]byte{0x00, 0x00, 0x01, 0x00}, []byte{0x00, 0x00, 0x02, 0x00}) - // Icns matches an ICNS (Apple Icon Image format) file. - Icns = prefix([]byte("icns")) - // Tiff matches a Tagged Image File Format file. - Tiff = prefix([]byte{0x49, 0x49, 0x2A, 0x00}, []byte{0x4D, 0x4D, 0x00, 0x2A}) - // Bpg matches a Better Portable Graphics file. - Bpg = prefix([]byte{0x42, 0x50, 0x47, 0xFB}) - // Xcf matches GIMP image data. - Xcf = prefix([]byte("gimp xcf")) - // Pat matches GIMP pattern data. - Pat = offset([]byte("GPAT"), 20) - // Gbr matches GIMP brush data. - Gbr = offset([]byte("GIMP"), 20) - // Hdr matches Radiance HDR image. - // https://web.archive.org/web/20060913152809/http://local.wasp.uwa.edu.au/~pbourke/dataformats/pic/ - Hdr = prefix([]byte("#?RADIANCE\n")) - // Xpm matches X PixMap image data. - Xpm = prefix([]byte{0x2F, 0x2A, 0x20, 0x58, 0x50, 0x4D, 0x20, 0x2A, 0x2F}) - // Jxs matches a JPEG XS coded image file (ISO/IEC 21122-3). - Jxs = prefix([]byte{0x00, 0x00, 0x00, 0x0C, 0x4A, 0x58, 0x53, 0x20, 0x0D, 0x0A, 0x87, 0x0A}) - // Jxr matches Microsoft HD JXR photo file. - Jxr = prefix([]byte{0x49, 0x49, 0xBC, 0x01}) -) +// Png matches a Portable Network Graphics file. +// https://www.w3.org/TR/PNG/ +func Png(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte{0x89, 0x50, 0x4E, 0x47, 0x0D, 0x0A, 0x1A, 0x0A}) +} -func jpeg2k(sig []byte) Detector { - return func(raw []byte, _ uint32) bool { - if len(raw) < 24 { - return false - } +// Apng matches an Animated Portable Network Graphics file. +// https://wiki.mozilla.org/APNG_Specification +func Apng(raw []byte, _ uint32) bool { + return offset(raw, []byte("acTL"), 37) +} - if !bytes.Equal(raw[4:8], []byte{0x6A, 0x50, 0x20, 0x20}) && - !bytes.Equal(raw[4:8], []byte{0x6A, 0x50, 0x32, 0x20}) { - return false - } - return bytes.Equal(raw[20:24], sig) +// Jpg matches a Joint Photographic Experts Group file. +func Jpg(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte{0xFF, 0xD8, 0xFF}) +} + +// Jp2 matches a JPEG 2000 Image file (ISO 15444-1). +func Jp2(raw []byte, _ uint32) bool { + return jpeg2k(raw, []byte{0x6a, 0x70, 0x32, 0x20}) +} + +// Jpx matches a JPEG 2000 Image file (ISO 15444-2). +func Jpx(raw []byte, _ uint32) bool { + return jpeg2k(raw, []byte{0x6a, 0x70, 0x78, 0x20}) +} + +// Jpm matches a JPEG 2000 Image file (ISO 15444-6). +func Jpm(raw []byte, _ uint32) bool { + return jpeg2k(raw, []byte{0x6a, 0x70, 0x6D, 0x20}) +} + +// Gif matches a Graphics Interchange Format file. +func Gif(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte("GIF87a")) || + bytes.HasPrefix(raw, []byte("GIF89a")) +} + +// Bmp matches a bitmap image file. +func Bmp(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte{0x42, 0x4D}) +} + +// Ps matches a PostScript file. +func Ps(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte("%!PS-Adobe-")) +} + +// Psd matches a Photoshop Document file. +func Psd(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte("8BPS")) +} + +// Ico matches an ICO file. +func Ico(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte{0x00, 0x00, 0x01, 0x00}) || + bytes.HasPrefix(raw, []byte{0x00, 0x00, 0x02, 0x00}) +} + +// Icns matches an ICNS (Apple Icon Image format) file. +func Icns(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte("icns")) +} + +// Tiff matches a Tagged Image File Format file. +func Tiff(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte{0x49, 0x49, 0x2A, 0x00}) || + bytes.HasPrefix(raw, []byte{0x4D, 0x4D, 0x00, 0x2A}) +} + +// Bpg matches a Better Portable Graphics file. +func Bpg(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte{0x42, 0x50, 0x47, 0xFB}) +} + +// Xcf matches GIMP image data. +func Xcf(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte("gimp xcf")) +} + +// Pat matches GIMP pattern data. +func Pat(raw []byte, _ uint32) bool { + return offset(raw, []byte("GPAT"), 20) +} + +// Gbr matches GIMP brush data. +func Gbr(raw []byte, _ uint32) bool { + return offset(raw, []byte("GIMP"), 20) +} + +// Hdr matches Radiance HDR image. +// https://web.archive.org/web/20060913152809/http://local.wasp.uwa.edu.au/~pbourke/dataformats/pic/ +func Hdr(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte("#?RADIANCE\n")) +} + +// Xpm matches X PixMap image data. +func Xpm(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte{0x2F, 0x2A, 0x20, 0x58, 0x50, 0x4D, 0x20, 0x2A, 0x2F}) +} + +// Jxs matches a JPEG XS coded image file (ISO/IEC 21122-3). +func Jxs(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte{0x00, 0x00, 0x00, 0x0C, 0x4A, 0x58, 0x53, 0x20, 0x0D, 0x0A, 0x87, 0x0A}) +} + +// Jxr matches Microsoft HD JXR photo file. +func Jxr(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte{0x49, 0x49, 0xBC, 0x01}) +} + +func jpeg2k(raw []byte, sig []byte) bool { + if len(raw) < 24 { + return false } + + if !bytes.Equal(raw[4:8], []byte{0x6A, 0x50, 0x20, 0x20}) && + !bytes.Equal(raw[4:8], []byte{0x6A, 0x50, 0x32, 0x20}) { + return false + } + return bytes.Equal(raw[20:24], sig) } // Webp matches a WebP file. @@ -108,3 +169,20 @@ func Jxl(raw []byte, _ uint32) bool { return bytes.HasPrefix(raw, []byte{0xFF, 0x0A}) || bytes.HasPrefix(raw, []byte("\x00\x00\x00\x0cJXL\x20\x0d\x0a\x87\x0a")) } + +// DXF matches Drawing Exchange Format AutoCAD file. +// There does not seem to be a clear specification and the files in the wild +// differ wildly. +// https://images.autodesk.com/adsk/files/autocad_2012_pdf_dxf-reference_enu.pdf +// +// I collected these signatures by downloading a few dozen files from +// http://cd.textfiles.com/amigaenv/DXF/OBJEKTE/ and +// https://sembiance.com/fileFormatSamples/poly/dxf/ and then +// xxd -l 16 {} | sort | uniq. +// These signatures are only for the ASCII version of DXF. There is a binary version too. +func DXF(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte(" 0\x0ASECTION\x0A")) || + bytes.HasPrefix(raw, []byte(" 0\x0D\x0ASECTION\x0D\x0A")) || + bytes.HasPrefix(raw, []byte("0\x0ASECTION\x0A")) || + bytes.HasPrefix(raw, []byte("0\x0D\x0ASECTION\x0D\x0A")) +} diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/magic.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/magic.go index 5fe435b99ff..6103c12d364 100644 --- a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/magic.go +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/magic.go @@ -3,7 +3,6 @@ package magic import ( "bytes" - "fmt" "github.com/gabriel-vasile/mimetype/internal/scan" ) @@ -22,37 +21,20 @@ type ( } ) -// prefix creates a Detector which returns true if any of the provided signatures -// is the prefix of the raw input. -func prefix(sigs ...[]byte) Detector { - return func(raw []byte, limit uint32) bool { - for _, s := range sigs { - if bytes.HasPrefix(raw, s) { - return true - } - } - return false - } -} - -// offset creates a Detector which returns true if the provided signature can be +// offset returns true if the provided signature can be // found at offset in the raw input. -func offset(sig []byte, offset int) Detector { - return func(raw []byte, limit uint32) bool { - return len(raw) > offset && bytes.HasPrefix(raw[offset:], sig) - } +func offset(raw []byte, sig []byte, offset int) bool { + return len(raw) > offset && bytes.HasPrefix(raw[offset:], sig) } // ciPrefix is like prefix but the check is case insensitive. -func ciPrefix(sigs ...[]byte) Detector { - return func(raw []byte, limit uint32) bool { - for _, s := range sigs { - if ciCheck(s, raw) { - return true - } +func ciPrefix(raw []byte, sigs ...[]byte) bool { + for _, s := range sigs { + if ciCheck(s, raw) { + return true } - return false } + return false } func ciCheck(sig, raw []byte) bool { if len(raw) < len(sig)+1 { @@ -72,22 +54,18 @@ func ciCheck(sig, raw []byte) bool { return true } -// xml creates a Detector which returns true if any of the provided XML signatures -// matches the raw input. -func xml(sigs ...xmlSig) Detector { - return func(raw []byte, limit uint32) bool { - b := scan.Bytes(raw) - b.TrimLWS() - if len(b) == 0 { - return false - } - for _, s := range sigs { - if xmlCheck(s, b) { - return true - } - } +// xml returns true if any of the provided XML signatures matches the raw input. +func xml(b scan.Bytes, sigs ...xmlSig) bool { + b.TrimLWS() + if len(b) == 0 { return false } + for _, s := range sigs { + if xmlCheck(s, b) { + return true + } + } + return false } func xmlCheck(sig xmlSig, raw []byte) bool { raw = raw[:min(len(raw), 512)] @@ -103,28 +81,24 @@ func xmlCheck(sig xmlSig, raw []byte) bool { return localNameIndex != -1 && localNameIndex < bytes.Index(raw, sig.xmlns) } -// markup creates a Detector which returns true is any of the HTML signatures -// matches the raw input. -func markup(sigs ...[]byte) Detector { - return func(raw []byte, limit uint32) bool { - b := scan.Bytes(raw) - if bytes.HasPrefix(b, []byte{0xEF, 0xBB, 0xBF}) { - // We skip the UTF-8 BOM if present to ensure we correctly - // process any leading whitespace. The presence of the BOM - // is taken into account during charset detection in charset.go. - b.Advance(3) - } - b.TrimLWS() - if len(b) == 0 { - return false - } - for _, s := range sigs { - if markupCheck(s, b) { - return true - } - } +// markup returns true is any of the HTML signatures matches the raw input. +func markup(b scan.Bytes, sigs ...[]byte) bool { + if bytes.HasPrefix(b, []byte{0xEF, 0xBB, 0xBF}) { + // We skip the UTF-8 BOM if present to ensure we correctly + // process any leading whitespace. The presence of the BOM + // is taken into account during charset detection in charset.go. + b.Advance(3) + } + b.TrimLWS() + if len(b) == 0 { return false } + for _, s := range sigs { + if markupCheck(s, b) { + return true + } + } + return false } func markupCheck(sig, raw []byte) bool { if len(raw) < len(sig)+1 { @@ -149,29 +123,17 @@ func markupCheck(sig, raw []byte) bool { return true } -// ftyp creates a Detector which returns true if any of the FTYP signatures -// matches the raw input. -func ftyp(sigs ...[]byte) Detector { - return func(raw []byte, limit uint32) bool { - if len(raw) < 12 { - return false - } - for _, s := range sigs { - if bytes.Equal(raw[8:12], s) { - return true - } - } +// ftyp returns true if any of the FTYP signatures matches the raw input. +func ftyp(raw []byte, sigs ...[]byte) bool { + if len(raw) < 12 { return false } -} - -func newXMLSig(localName, xmlns string) xmlSig { - ret := xmlSig{xmlns: []byte(xmlns)} - if localName != "" { - ret.localName = []byte(fmt.Sprintf("<%s", localName)) + for _, s := range sigs { + if bytes.Equal(raw[8:12], s) { + return true + } } - - return ret + return false } // A valid shebang starts with the "#!" characters, @@ -184,29 +146,17 @@ func newXMLSig(localName, xmlns string) xmlSig { // #! /usr/bin/env php // // /usr/bin/env is the interpreter, php is the first and only argument. -func shebang(sigs ...[]byte) Detector { - return func(raw []byte, limit uint32) bool { - b := scan.Bytes(raw) - line := b.Line() - for _, s := range sigs { - if shebangCheck(s, line) { - return true - } +func shebang(b scan.Bytes, matchFlags scan.Flags, sigs ...[]byte) bool { + line := b.Line() + if len(line) < 2 || line[0] != '#' || line[1] != '!' { + return false + } + line = line[2:] + line.TrimLWS() + for _, s := range sigs { + if line.Match(s, matchFlags) != -1 { + return true } - return false } -} - -func shebangCheck(sig []byte, raw scan.Bytes) bool { - if len(raw) < len(sig)+2 { - return false - } - if raw[0] != '#' || raw[1] != '!' { - return false - } - - raw.Advance(2) // skip #! we checked above - raw.TrimLWS() - raw.TrimRWS() - return bytes.Equal(raw, sig) + return false } diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/meteo.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/meteo.go new file mode 100644 index 00000000000..da77d0b0ea0 --- /dev/null +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/meteo.go @@ -0,0 +1,12 @@ +package magic + +import "bytes" + +// GRIB matches a GRIdded Binary meteorological file. +// https://www.nco.ncep.noaa.gov/pmb/docs/on388/ +// https://www.nco.ncep.noaa.gov/pmb/docs/grib2/grib2_doc/ +func GRIB(raw []byte, _ uint32) bool { + return len(raw) > 7 && + bytes.HasPrefix(raw, []byte("GRIB")) && + (raw[7] == 1 || raw[7] == 2) +} diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ms_office.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ms_office.go index c912823e928..e689e92a362 100644 --- a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ms_office.go +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ms_office.go @@ -44,17 +44,6 @@ func Ole(raw []byte, limit uint32) bool { return bytes.HasPrefix(raw, []byte{0xD0, 0xCF, 0x11, 0xE0, 0xA1, 0xB1, 0x1A, 0xE1}) } -// Aaf matches an Advanced Authoring Format file. -// See: https://pyaaf.readthedocs.io/en/latest/about.html -// See: https://en.wikipedia.org/wiki/Advanced_Authoring_Format -func Aaf(raw []byte, limit uint32) bool { - if len(raw) < 31 { - return false - } - return bytes.HasPrefix(raw[8:], []byte{0x41, 0x41, 0x46, 0x42, 0x0D, 0x00, 0x4F, 0x4D}) && - (raw[30] == 0x09 || raw[30] == 0x0C) -} - // Doc matches a Microsoft Word 97-2003 file. // See: https://github.com/decalage2/oletools/blob/412ee36ae45e70f42123e835871bac956d958461/oletools/common/clsid.py func Doc(raw []byte, _ uint32) bool { @@ -203,9 +192,21 @@ func matchOleClsid(in []byte, clsid []byte) bool { // Expected offset of CLSID for root storage object. clsidOffset := sectorLength*(1+firstSecID) + 80 - if len(in) <= clsidOffset+16 { + // #731 offset is outside in or wrapped around due to integer overflow. + if len(in) <= clsidOffset+16 || clsidOffset < 0 { return false } return bytes.HasPrefix(in[clsidOffset:], clsid) } + +// WPD matches a WordPerfect document. +func WPD(raw []byte, _ uint32) bool { + if len(raw) < 10 { + return false + } + if !bytes.HasPrefix(raw, []byte("\xffWPC")) { + return false + } + return raw[8] == 1 && raw[9] == 10 +} diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/text.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/text.go index 1841ee871db..82f6c6702d4 100644 --- a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/text.go +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/text.go @@ -10,9 +10,9 @@ import ( "github.com/gabriel-vasile/mimetype/internal/scan" ) -var ( - // HTML matches a Hypertext Markup Language file. - HTML = markup( +// HTML matches a Hypertext Markup Language file. +func HTML(raw []byte, _ uint32) bool { + return markup(raw, []byte(" 0 && firstNonWS == '[') + hasTargetTok = hasTargetTok || (t&json.TokObject > 0 && firstNonWS == '{') + } + if !hasTargetTok { return false } lraw := len(raw) - parsed, inspected, firstToken, querySatisfied := json.Parse(q, raw) - if !querySatisfied || firstToken&wantTok == 0 { + parsed, inspected, _, querySatisfied := json.Parse(q, raw) + if !querySatisfied { return false } @@ -244,7 +376,7 @@ func jsonHelper(raw []byte, limit uint32, q string, wantTok int) bool { // If a section of the file was provided, check if all of it was inspected. // In other words, check that if there was a problem parsing, that problem - // occured at the last byte in the input. + // occurred at the last byte in the input. return inspected == lraw && lraw > 0 } @@ -294,11 +426,12 @@ func svgWithoutXMLDeclaration(s scan.Bytes) bool { return false } - targetName, targetVal := "xmlns", "http://www.w3.org/2000/svg" - aName, aVal, hasMore := "", "", true + targetName, targetVal := []byte("xmlns"), []byte("http://www.w3.org/2000/svg") + var aName, aVal []byte + hasMore := true for hasMore { aName, aVal, hasMore = mkup.GetAnAttribute(&s) - if aName == targetName && aVal == targetVal { + if bytes.Equal(aName, targetName) && bytes.Equal(aVal, targetVal) { return true } if !hasMore { @@ -325,10 +458,11 @@ func svgWithXMLDeclaration(s scan.Bytes) bool { // version is a required attribute for XML. hasVersion := false - aName, hasMore := "", true + var aName []byte + hasMore := true for hasMore { aName, _, hasMore = mkup.GetAnAttribute(&s) - if aName == "version" { + if bytes.Equal(aName, []byte("version")) { hasVersion = true break } @@ -409,3 +543,57 @@ func Vtt(raw []byte, limit uint32) bool { return bytes.Equal(raw, []byte{0xEF, 0xBB, 0xBF, 0x57, 0x45, 0x42, 0x56, 0x54, 0x54}) || // UTF-8 BOM and "WEBVTT" bytes.Equal(raw, []byte{0x57, 0x45, 0x42, 0x56, 0x54, 0x54}) // "WEBVTT" } + +type rfc822Hint struct { + h []byte + matchFlags scan.Flags +} + +// The hints come from libmagic, but the implementation is bit different. libmagic +// only checks if the file starts with the hint, while we additionally look for +// a secondary hint in the first few lines of input. +func RFC822(raw []byte, limit uint32) bool { + b := scan.Bytes(raw) + + // Keep hints here to avoid instantiating them several times in lineHasRFC822Hint. + // The alternative is to make them a package level var, but then they'd go + // on the heap. + // Some of the hints are IgnoreCase, some not. I selected based on what libmagic + // does and based on personal observations from sample files. + hints := []rfc822Hint{ + {[]byte("From: "), 0}, + {[]byte("To: "), 0}, + {[]byte("CC: "), scan.IgnoreCase}, + {[]byte("Date: "), 0}, + {[]byte("Subject: "), 0}, + {[]byte("Received: "), 0}, + {[]byte("Relay-Version: "), 0}, + {[]byte("#! rnews"), 0}, + {[]byte("N#! rnews"), 0}, + {[]byte("Forward to"), 0}, + {[]byte("Pipe to"), 0}, + {[]byte("DELIVERED-TO: "), scan.IgnoreCase}, + {[]byte("RETURN-PATH: "), scan.IgnoreCase}, + {[]byte("Content-Type: "), 0}, + {[]byte("Content-Transfer-Encoding: "), 0}, + } + if !lineHasRFC822Hint(b.Line(), hints) { + return false + } + for i := 0; i < 20; i++ { + if lineHasRFC822Hint(b.Line(), hints) { + return true + } + } + + return false +} + +func lineHasRFC822Hint(b scan.Bytes, hints []rfc822Hint) bool { + for _, h := range hints { + if b.Match(h.h, h.matchFlags) > -1 { + return true + } + } + return false +} diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/video.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/video.go index 9caf55538a3..23e30da2b97 100644 --- a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/video.go +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/video.go @@ -4,17 +4,23 @@ import ( "bytes" ) -var ( - // Flv matches a Flash video file. - Flv = prefix([]byte("\x46\x4C\x56\x01")) - // Asf matches an Advanced Systems Format file. - Asf = prefix([]byte{ +// Flv matches a Flash video file. +func Flv(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte("\x46\x4C\x56\x01")) +} + +// Asf matches an Advanced Systems Format file. +func Asf(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte{ 0x30, 0x26, 0xB2, 0x75, 0x8E, 0x66, 0xCF, 0x11, 0xA6, 0xD9, 0x00, 0xAA, 0x00, 0x62, 0xCE, 0x6C, }) - // Rmvb matches a RealMedia Variable Bitrate file. - Rmvb = prefix([]byte{0x2E, 0x52, 0x4D, 0x46}) -) +} + +// Rmvb matches a RealMedia Variable Bitrate file. +func Rmvb(raw []byte, _ uint32) bool { + return bytes.HasPrefix(raw, []byte{0x2E, 0x52, 0x4D, 0x46}) +} // WebM matches a WebM file. func WebM(raw []byte, limit uint32) bool { @@ -63,9 +69,9 @@ func isFileTypeNamePresent(in []byte, flType string) bool { // vintWidth parses the variable-integer width in matroska containers func vintWidth(v int) int { - mask, max, num := 128, 8, 1 - for num < max && v&mask == 0 { - mask = mask >> 1 + mask, nTimes, num := 128, 8, 1 + for num < nTimes && v&mask == 0 { + mask >>= 1 num++ } return num diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/zip.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/zip.go index 17750e6e6f4..f3bfa2ac37a 100644 --- a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/zip.go +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/zip.go @@ -6,32 +6,65 @@ import ( "github.com/gabriel-vasile/mimetype/internal/scan" ) -var ( - // Odt matches an OpenDocument Text file. - Odt = offset([]byte("mimetypeapplication/vnd.oasis.opendocument.text"), 30) - // Ott matches an OpenDocument Text Template file. - Ott = offset([]byte("mimetypeapplication/vnd.oasis.opendocument.text-template"), 30) - // Ods matches an OpenDocument Spreadsheet file. - Ods = offset([]byte("mimetypeapplication/vnd.oasis.opendocument.spreadsheet"), 30) - // Ots matches an OpenDocument Spreadsheet Template file. - Ots = offset([]byte("mimetypeapplication/vnd.oasis.opendocument.spreadsheet-template"), 30) - // Odp matches an OpenDocument Presentation file. - Odp = offset([]byte("mimetypeapplication/vnd.oasis.opendocument.presentation"), 30) - // Otp matches an OpenDocument Presentation Template file. - Otp = offset([]byte("mimetypeapplication/vnd.oasis.opendocument.presentation-template"), 30) - // Odg matches an OpenDocument Drawing file. - Odg = offset([]byte("mimetypeapplication/vnd.oasis.opendocument.graphics"), 30) - // Otg matches an OpenDocument Drawing Template file. - Otg = offset([]byte("mimetypeapplication/vnd.oasis.opendocument.graphics-template"), 30) - // Odf matches an OpenDocument Formula file. - Odf = offset([]byte("mimetypeapplication/vnd.oasis.opendocument.formula"), 30) - // Odc matches an OpenDocument Chart file. - Odc = offset([]byte("mimetypeapplication/vnd.oasis.opendocument.chart"), 30) - // Epub matches an EPUB file. - Epub = offset([]byte("mimetypeapplication/epub+zip"), 30) - // Sxc matches an OpenOffice Spreadsheet file. - Sxc = offset([]byte("mimetypeapplication/vnd.sun.xml.calc"), 30) -) +// Odt matches an OpenDocument Text file. +func Odt(raw []byte, _ uint32) bool { + return offset(raw, []byte("mimetypeapplication/vnd.oasis.opendocument.text"), 30) +} + +// Ott matches an OpenDocument Text Template file. +func Ott(raw []byte, _ uint32) bool { + return offset(raw, []byte("mimetypeapplication/vnd.oasis.opendocument.text-template"), 30) +} + +// Ods matches an OpenDocument Spreadsheet file. +func Ods(raw []byte, _ uint32) bool { + return offset(raw, []byte("mimetypeapplication/vnd.oasis.opendocument.spreadsheet"), 30) +} + +// Ots matches an OpenDocument Spreadsheet Template file. +func Ots(raw []byte, _ uint32) bool { + return offset(raw, []byte("mimetypeapplication/vnd.oasis.opendocument.spreadsheet-template"), 30) +} + +// Odp matches an OpenDocument Presentation file. +func Odp(raw []byte, _ uint32) bool { + return offset(raw, []byte("mimetypeapplication/vnd.oasis.opendocument.presentation"), 30) +} + +// Otp matches an OpenDocument Presentation Template file. +func Otp(raw []byte, _ uint32) bool { + return offset(raw, []byte("mimetypeapplication/vnd.oasis.opendocument.presentation-template"), 30) +} + +// Odg matches an OpenDocument Drawing file. +func Odg(raw []byte, _ uint32) bool { + return offset(raw, []byte("mimetypeapplication/vnd.oasis.opendocument.graphics"), 30) +} + +// Otg matches an OpenDocument Drawing Template file. +func Otg(raw []byte, _ uint32) bool { + return offset(raw, []byte("mimetypeapplication/vnd.oasis.opendocument.graphics-template"), 30) +} + +// Odf matches an OpenDocument Formula file. +func Odf(raw []byte, _ uint32) bool { + return offset(raw, []byte("mimetypeapplication/vnd.oasis.opendocument.formula"), 30) +} + +// Odc matches an OpenDocument Chart file. +func Odc(raw []byte, _ uint32) bool { + return offset(raw, []byte("mimetypeapplication/vnd.oasis.opendocument.chart"), 30) +} + +// Epub matches an EPUB file. +func Epub(raw []byte, _ uint32) bool { + return offset(raw, []byte("mimetypeapplication/epub+zip"), 30) +} + +// Sxc matches an OpenOffice Spreadsheet file. +func Sxc(raw []byte, _ uint32) bool { + return offset(raw, []byte("mimetypeapplication/vnd.sun.xml.calc"), 30) +} // Zip matches a zip archive. func Zip(raw []byte, limit uint32) bool { @@ -52,10 +85,14 @@ func Zip(raw []byte, limit uint32) bool { // (instead of relying on offsets told by the file.) func Jar(raw []byte, limit uint32) bool { return executableJar(raw) || + // First entry must be an empty META-INF directory or the manifest. + // There is no specification saying that, but the jar reader and writer + // implementations from Java do it that way. + // https://github.com/openjdk/jdk/blob/88c4678eed818cbe9380f35352e90883fed27d33/src/java.base/share/classes/java/util/jar/JarInputStream.java#L170-L173 zipHas(raw, zipEntries{{ - name: []byte("META-INF/MANIFEST.MF"), - }, { name: []byte("META-INF/"), + }, { + name: []byte("META-INF/MANIFEST.MF"), }}, 1) } @@ -94,11 +131,14 @@ type zipEntries []struct { func (z zipEntries) match(file []byte) bool { for i := range z { - if z[i].dir && bytes.HasPrefix(file, z[i].name) { - return true - } - if bytes.Equal(file, z[i].name) { - return true + if z[i].dir { + if bytes.HasPrefix(file, z[i].name) { + return true + } + } else { + if bytes.Equal(file, z[i].name) { + return true + } } } return false @@ -134,11 +174,11 @@ func msoxml(raw scan.Bytes, searchFor zipEntries, stopAfter int) bool { // If the first is not one of the next usually expected entries, // then abort this check. if i == 0 { - if !bytes.Equal(f, []byte("[Content_Types].xml")) && - !bytes.Equal(f, []byte("_rels/.rels")) && - !bytes.Equal(f, []byte("docProps")) && - !bytes.Equal(f, []byte("customXml")) && - !bytes.Equal(f, []byte("[trash]")) { + if !bytes.Equal(f, []byte("[Content_Types].xml")) && // this is a file + !bytes.HasPrefix(f, []byte("_rels/")) && // these are directories + !bytes.HasPrefix(f, []byte("docProps/")) && + !bytes.HasPrefix(f, []byte("customXml/")) && + !bytes.HasPrefix(f, []byte("[trash]/")) { return false } } diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/markup/markup.go b/vendor/github.com/gabriel-vasile/mimetype/internal/markup/markup.go index 937fa1da591..b9abc6dab19 100644 --- a/vendor/github.com/gabriel-vasile/mimetype/internal/markup/markup.go +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/markup/markup.go @@ -8,46 +8,48 @@ import ( "github.com/gabriel-vasile/mimetype/internal/scan" ) -func GetAnAttribute(s *scan.Bytes) (name, val string, hasMore bool) { +// GetAnAttribute assumes we passed over an SGML tag and extracts first +// attribute and its value. +// +// Initially, this code existed inside charset/charset.go, because it was part of +// implementing the https://html.spec.whatwg.org/multipage/parsing.html#prescan-a-byte-stream-to-determine-its-encoding +// algorithm. But because extracting an attribute from a tag is the same for +// both HTML and XML, then the code was moved here. +func GetAnAttribute(s *scan.Bytes) (name, val []byte, hasMore bool) { for scan.ByteIsWS(s.Peek()) || s.Peek() == '/' { s.Advance(1) } if s.Peek() == '>' { - return "", "", false + return nil, nil, false } - // Allocate 10 to avoid resizes. - // Attribute names and values are continuous slices of bytes in input, - // so we could do without allocating and returning slices of input. - nameB := make([]byte, 0, 10) + origS, end := *s, 0 // step 4 and 5 for { // bap means byte at position in the specification. bap := s.Pop() if bap == 0 { - return "", "", false + return nil, nil, false } - if bap == '=' && len(nameB) > 0 { + if bap == '=' && end > 0 { val, hasMore := getAValue(s) - return string(nameB), string(val), hasMore + return origS[:end], val, hasMore } else if scan.ByteIsWS(bap) { for scan.ByteIsWS(s.Peek()) { s.Advance(1) } if s.Peek() != '=' { - return string(nameB), "", true + return origS[:end], nil, true } s.Advance(1) for scan.ByteIsWS(s.Peek()) { s.Advance(1) } val, hasMore := getAValue(s) - return string(nameB), string(val), hasMore + return origS[:end], val, hasMore } else if bap == '/' || bap == '>' { - return string(nameB), "", false - } else if bap >= 'A' && bap <= 'Z' { - nameB = append(nameB, bap+0x20) - } else { - nameB = append(nameB, bap) + return origS[:end], nil, false + } else { // for any ASCII, non-ASCII, just advance + end++ } } } diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/scan/bytes.go b/vendor/github.com/gabriel-vasile/mimetype/internal/scan/bytes.go index 9f09f0781c0..552b4ead908 100644 --- a/vendor/github.com/gabriel-vasile/mimetype/internal/scan/bytes.go +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/scan/bytes.go @@ -35,6 +35,19 @@ func (b *Bytes) TrimRWS() { } } +// FirstNonWS returns the first non-whitespace character from b, +// or 0x00 if no such character is found. +func (b Bytes) FirstNonWS() byte { + for i := range b { + if ByteIsWS(b[i]) { + continue + } + return b[i] + } + + return 0x00 +} + // Peek one byte from b or 0x00 if b is empty. func (b *Bytes) Peek() byte { if len(*b) > 0 { @@ -63,8 +76,8 @@ func (b *Bytes) PopN(n int) []byte { return nil } -// PopUntil will advance b until, but not including, the first occurence of stopAt -// character. If no occurence is found, then it will advance until the end of b. +// PopUntil will advance b until, but not including, the first occurrence of stopAt +// character. If no occurrence is found, then it will advance until the end of b. // The returned Bytes is a slice of all the bytes that we're advanced over. func (b *Bytes) PopUntil(stopAt ...byte) Bytes { if len(*b) == 0 { @@ -77,7 +90,7 @@ func (b *Bytes) PopUntil(stopAt ...byte) Bytes { prefix := (*b)[:i] *b = (*b)[i:] - return Bytes(prefix) + return prefix } // ReadSlice is the same as PopUntil, but the returned value includes stopAt as well. @@ -94,7 +107,7 @@ func (b *Bytes) ReadSlice(stopAt byte) Bytes { prefix := (*b)[:i] *b = (*b)[i:] - return Bytes(prefix) + return prefix } // Line returns the first line from b and advances b with the length of the @@ -117,7 +130,7 @@ func (b *Bytes) Line() Bytes { // If b length is less than readLimit, it means we received an incomplete file // and proceed with dropping the last line. func (b *Bytes) DropLastLine(readLimit uint32) { - if readLimit == 0 || uint32(len(*b)) < readLimit { + if readLimit == 0 || uint64(len(*b)) < uint64(readLimit) { return } @@ -138,46 +151,85 @@ func (b *Bytes) Uint16() (uint16, bool) { return v, true } +type Flags int + const ( - CompactWS = 1 << iota + // CompactWS will make one whitespace from pattern to match one or more spaces from input. + CompactWS Flags = 1 << iota + // IgnoreCase will match lower case from pattern with lower case from input. + // IgnoreCase will match upper case from pattern with both lower and upper case from input. + // This flag is not really well named, IgnoreCase + // FullWord ensures the input ends with a full word (it's followed by spaces.) + FullWord ) -// Search for occurences of pattern p inside b at any index. -func (b Bytes) Search(p []byte, flags int) int { +// Search for occurrences of pattern p inside b at any index. +// It returns the index where p was found in b and how many bytes were needed +// for matching the pattern. +func (b Bytes) Search(p []byte, flags Flags) (i int, l int) { + lb, lp := len(b), len(p) + if lp == 0 { + return 0, 0 + } + if lb == 0 { + return -1, 0 + } if flags == 0 { - return bytes.Index(b, p) + if i = bytes.Index(b, p); i == -1 { + return -1, 0 + } else { + return i, lp + } } - lb, lp := len(b), len(p) for i := range b { if lb-i < lp { - return -1 + return -1, 0 } - if b[i:].Match(p, flags) { - return i + if l = b[i:].Match(p, flags); l != -1 { + return i, l } } - return 0 + return -1, 0 } -// Match pattern p at index 0 of b. -func (b Bytes) Match(p []byte, flags int) bool { +// Match returns how many bytes were needed to match pattern p. +// It returns -1 if p does not match b. +func (b Bytes) Match(p []byte, flags Flags) int { + l := len(b) + if len(p) == 0 { + return 0 + } + if l == 0 { + return -1 + } + // If no flags, or scanning for full word at the end of pattern then + // do a fast HasPrefix check. + // For other flags it's not possible to use HasPrefix. + if flags == 0 || flags&FullWord > 0 { + if bytes.HasPrefix(b, p) { + b = b[len(p):] + p = p[len(p):] + goto out + } + return -1 + } for len(b) > 0 { - // If we finished all we we're looking for from p. + // If we finished all we were looking for from p. if len(p) == 0 { - return true + goto out } if flags&IgnoreCase > 0 && isUpper(p[0]) { if upper(b[0]) != p[0] { - return false + return -1 } b, p = b[1:], p[1:] } else if flags&CompactWS > 0 && ByteIsWS(p[0]) { p = p[1:] if !ByteIsWS(b[0]) { - return false + return -1 } b = b[1:] if !ByteIsWS(p[0]) { @@ -185,12 +237,22 @@ func (b Bytes) Match(p []byte, flags int) bool { } } else { if b[0] != p[0] { - return false + return -1 } b, p = b[1:], p[1:] } } - return true +out: + // If p still has leftover characters, it means it didn't fully match b. + if len(p) > 0 { + return -1 + } + if flags&FullWord > 0 { + if len(b) > 0 && !ByteIsWS(b[0]) { + return -1 + } + } + return l - len(b) } func isUpper(c byte) bool { diff --git a/vendor/github.com/gabriel-vasile/mimetype/mime.go b/vendor/github.com/gabriel-vasile/mimetype/mime.go index b82627a8b83..3dadf720a73 100644 --- a/vendor/github.com/gabriel-vasile/mimetype/mime.go +++ b/vendor/github.com/gabriel-vasile/mimetype/mime.go @@ -2,6 +2,8 @@ package mimetype import ( "mime" + "slices" + "strings" "github.com/gabriel-vasile/mimetype/internal/charset" "github.com/gabriel-vasile/mimetype/internal/magic" @@ -57,10 +59,8 @@ func (m *MIME) Is(expectedMIME string) bool { return true } - for _, alias := range m.aliases { - if alias == expectedMIME { - return true - } + if slices.Contains(m.aliases, expectedMIME) { + return true } return false @@ -109,7 +109,7 @@ func (m *MIME) match(in []byte, readLimit uint32) *MIME { // Limit the number of bytes searched for to 1024. charset = f(in[:min(len(in), 1024)]) } - if m == root { + if m == root || charset == "" { return m } @@ -126,6 +126,27 @@ func (m *MIME) flatten() []*MIME { return out } +// hierarchy returns an easy to read list of ancestors for m. +// For example, application/json would return json>txt>root. +func (m *MIME) hierarchy() string { + h := "" + for m := m; m != nil; m = m.Parent() { + e := strings.TrimPrefix(m.Extension(), ".") + if e == "" { + // There are some MIME without extensions. When generating the hierarchy, + // it would be confusing to use empty string as extension. + // Use the subtype instead; ex: application/x-executable -> x-executable. + e = strings.Split(m.String(), "/")[1] + if m.Is("application/octet-stream") { + // for octet-stream use root, because it's short and used in many places + e = "root" + } + } + h += ">" + e + } + return strings.TrimPrefix(h, ">") +} + // clone creates a new MIME with the provided optional MIME parameters. func (m *MIME) clone(charset string) *MIME { clonedMIME := m.mime @@ -155,10 +176,11 @@ func (m *MIME) cloneHierarchy(charset string) *MIME { } func (m *MIME) lookup(mime string) *MIME { - for _, n := range append(m.aliases, m.mime) { - if n == mime { - return m - } + if mime == m.mime { + return m + } + if slices.Contains(m.aliases, mime) { + return m } for _, c := range m.children { diff --git a/vendor/github.com/gabriel-vasile/mimetype/mimetype.go b/vendor/github.com/gabriel-vasile/mimetype/mimetype.go index d8d512b8062..792741732b7 100644 --- a/vendor/github.com/gabriel-vasile/mimetype/mimetype.go +++ b/vendor/github.com/gabriel-vasile/mimetype/mimetype.go @@ -12,7 +12,7 @@ import ( "sync/atomic" ) -var defaultLimit uint32 = 3072 +const defaultLimit uint32 = 3072 // readLimit is the maximum number of bytes from the input used when detecting. var readLimit uint32 = defaultLimit @@ -112,15 +112,18 @@ func SetLimit(limit uint32) { } // Extend adds detection for other file formats. -// It is equivalent to calling Extend() on the root mime type "application/octet-stream". +// It is equivalent to calling Extend() on the root MIME type "application/octet-stream". func Extend(detector func(raw []byte, limit uint32) bool, mime, extension string, aliases ...string) { root.Extend(detector, mime, extension, aliases...) } // Lookup finds a MIME object by its string representation. -// The representation can be the main mime type, or any of its aliases. -func Lookup(mime string) *MIME { +// The representation can be the main MIME type, or any of its aliases. +func Lookup(m string) *MIME { + // We store the MIME types without optional params, so + // perform parsing to extract the target MIME type without optional params. + m, _, _ = mime.ParseMediaType(m) mu.RLock() defer mu.RUnlock() - return root.lookup(mime) + return root.lookup(m) } diff --git a/vendor/github.com/gabriel-vasile/mimetype/supported_mimes.md b/vendor/github.com/gabriel-vasile/mimetype/supported_mimes.md index 3186a8bf0b9..45de7b9e339 100644 --- a/vendor/github.com/gabriel-vasile/mimetype/supported_mimes.md +++ b/vendor/github.com/gabriel-vasile/mimetype/supported_mimes.md @@ -1,196 +1,200 @@ -## 191 Supported MIME types +## 195 Supported MIME types This file is automatically generated when running tests. Do not edit manually. -Extension | MIME type | Aliases ---------- | --------- | ------- -**n/a** | application/octet-stream | - -**.xpm** | image/x-xpixmap | - -**.7z** | application/x-7z-compressed | - -**.zip** | application/zip | application/x-zip, application/x-zip-compressed -**.docx** | application/vnd.openxmlformats-officedocument.wordprocessingml.document | - -**.pptx** | application/vnd.openxmlformats-officedocument.presentationml.presentation | - -**.xlsx** | application/vnd.openxmlformats-officedocument.spreadsheetml.sheet | - -**.epub** | application/epub+zip | - -**.apk** | application/vnd.android.package-archive | - -**.jar** | application/java-archive | application/jar, application/jar-archive, application/x-java-archive -**.odt** | application/vnd.oasis.opendocument.text | application/x-vnd.oasis.opendocument.text -**.ott** | application/vnd.oasis.opendocument.text-template | application/x-vnd.oasis.opendocument.text-template -**.ods** | application/vnd.oasis.opendocument.spreadsheet | application/x-vnd.oasis.opendocument.spreadsheet -**.ots** | application/vnd.oasis.opendocument.spreadsheet-template | application/x-vnd.oasis.opendocument.spreadsheet-template -**.odp** | application/vnd.oasis.opendocument.presentation | application/x-vnd.oasis.opendocument.presentation -**.otp** | application/vnd.oasis.opendocument.presentation-template | application/x-vnd.oasis.opendocument.presentation-template -**.odg** | application/vnd.oasis.opendocument.graphics | application/x-vnd.oasis.opendocument.graphics -**.otg** | application/vnd.oasis.opendocument.graphics-template | application/x-vnd.oasis.opendocument.graphics-template -**.odf** | application/vnd.oasis.opendocument.formula | application/x-vnd.oasis.opendocument.formula -**.odc** | application/vnd.oasis.opendocument.chart | application/x-vnd.oasis.opendocument.chart -**.sxc** | application/vnd.sun.xml.calc | - -**.kmz** | application/vnd.google-earth.kmz | - -**.vsdx** | application/vnd.ms-visio.drawing.main+xml | - -**.pdf** | application/pdf | application/x-pdf -**.fdf** | application/vnd.fdf | - -**n/a** | application/x-ole-storage | - -**.msi** | application/x-ms-installer | application/x-windows-installer, application/x-msi -**.aaf** | application/octet-stream | - -**.msg** | application/vnd.ms-outlook | - -**.xls** | application/vnd.ms-excel | application/msexcel -**.pub** | application/vnd.ms-publisher | - -**.ppt** | application/vnd.ms-powerpoint | application/mspowerpoint -**.doc** | application/msword | application/vnd.ms-word -**.ps** | application/postscript | - -**.psd** | image/vnd.adobe.photoshop | image/x-psd, application/photoshop -**.p7s** | application/pkcs7-signature | - -**.ogg** | application/ogg | application/x-ogg -**.oga** | audio/ogg | - -**.ogv** | video/ogg | - -**.png** | image/png | - -**.png** | image/vnd.mozilla.apng | - -**.jpg** | image/jpeg | - -**.jxl** | image/jxl | - -**.jp2** | image/jp2 | - -**.jpf** | image/jpx | - -**.jpm** | image/jpm | video/jpm -**.jxs** | image/jxs | - -**.gif** | image/gif | - -**.webp** | image/webp | - -**.exe** | application/vnd.microsoft.portable-executable | - -**n/a** | application/x-elf | - -**n/a** | application/x-object | - -**n/a** | application/x-executable | - -**.so** | application/x-sharedlib | - -**n/a** | application/x-coredump | - -**.a** | application/x-archive | application/x-unix-archive -**.deb** | application/vnd.debian.binary-package | - -**.tar** | application/x-tar | - -**.xar** | application/x-xar | - -**.bz2** | application/x-bzip2 | - -**.fits** | application/fits | image/fits -**.tiff** | image/tiff | - -**.bmp** | image/bmp | image/x-bmp, image/x-ms-bmp -**.123** | application/vnd.lotus-1-2-3 | - -**.ico** | image/x-icon | - -**.mp3** | audio/mpeg | audio/x-mpeg, audio/mp3 -**.flac** | audio/flac | - -**.midi** | audio/midi | audio/mid, audio/sp-midi, audio/x-mid, audio/x-midi -**.ape** | audio/ape | - -**.mpc** | audio/musepack | - -**.amr** | audio/amr | audio/amr-nb -**.wav** | audio/wav | audio/x-wav, audio/vnd.wave, audio/wave -**.aiff** | audio/aiff | audio/x-aiff -**.au** | audio/basic | - -**.mpeg** | video/mpeg | - -**.mov** | video/quicktime | - -**.mp4** | video/mp4 | - -**.avif** | image/avif | - -**.3gp** | video/3gpp | video/3gp, audio/3gpp -**.3g2** | video/3gpp2 | video/3g2, audio/3gpp2 -**.mp4** | audio/mp4 | audio/x-mp4a -**.mqv** | video/quicktime | - -**.m4a** | audio/x-m4a | - -**.m4v** | video/x-m4v | - -**.heic** | image/heic | - -**.heic** | image/heic-sequence | - -**.heif** | image/heif | - -**.heif** | image/heif-sequence | - -**.mj2** | video/mj2 | - -**.dvb** | video/vnd.dvb.file | - -**.webm** | video/webm | audio/webm -**.avi** | video/x-msvideo | video/avi, video/msvideo -**.flv** | video/x-flv | - -**.mkv** | video/x-matroska | - -**.asf** | video/x-ms-asf | video/asf, video/x-ms-wmv -**.aac** | audio/aac | - -**.voc** | audio/x-unknown | - -**.m3u** | application/vnd.apple.mpegurl | audio/mpegurl -**.rmvb** | application/vnd.rn-realmedia-vbr | - -**.gz** | application/gzip | application/x-gzip, application/x-gunzip, application/gzipped, application/gzip-compressed, application/x-gzip-compressed, gzip/document -**.class** | application/x-java-applet | - -**.swf** | application/x-shockwave-flash | - -**.crx** | application/x-chrome-extension | - -**.ttf** | font/ttf | font/sfnt, application/x-font-ttf, application/font-sfnt -**.woff** | font/woff | - -**.woff2** | font/woff2 | - -**.otf** | font/otf | - -**.ttc** | font/collection | - -**.eot** | application/vnd.ms-fontobject | - -**.wasm** | application/wasm | - -**.shx** | application/vnd.shx | - -**.shp** | application/vnd.shp | - -**.dbf** | application/x-dbf | - -**.dcm** | application/dicom | - -**.rar** | application/x-rar-compressed | application/x-rar -**.djvu** | image/vnd.djvu | - -**.mobi** | application/x-mobipocket-ebook | - -**.lit** | application/x-ms-reader | - -**.bpg** | image/bpg | - -**.cbor** | application/cbor | - -**.sqlite** | application/vnd.sqlite3 | application/x-sqlite3 -**.dwg** | image/vnd.dwg | image/x-dwg, application/acad, application/x-acad, application/autocad_dwg, application/dwg, application/x-dwg, application/x-autocad, drawing/dwg -**.nes** | application/vnd.nintendo.snes.rom | - -**.lnk** | application/x-ms-shortcut | - -**.macho** | application/x-mach-binary | - -**.qcp** | audio/qcelp | - -**.icns** | image/x-icns | - -**.hdr** | image/vnd.radiance | - -**.mrc** | application/marc | - -**.mdb** | application/x-msaccess | - -**.accdb** | application/x-msaccess | - -**.zst** | application/zstd | - -**.cab** | application/vnd.ms-cab-compressed | - -**.rpm** | application/x-rpm | - -**.xz** | application/x-xz | - -**.lz** | application/lzip | application/x-lzip -**.torrent** | application/x-bittorrent | - -**.cpio** | application/x-cpio | - -**n/a** | application/tzif | - -**.xcf** | image/x-xcf | - -**.pat** | image/x-gimp-pat | - -**.gbr** | image/x-gimp-gbr | - -**.glb** | model/gltf-binary | - -**.cab** | application/x-installshield | - -**.jxr** | image/jxr | image/vnd.ms-photo -**.parquet** | application/vnd.apache.parquet | application/x-parquet -**.one** | application/onenote | - -**.chm** | application/vnd.ms-htmlhelp | - -**.txt** | text/plain | - -**.svg** | image/svg+xml | - -**.html** | text/html | - -**.xml** | text/xml | application/xml -**.rss** | application/rss+xml | text/rss -**.atom** | application/atom+xml | - -**.x3d** | model/x3d+xml | - -**.kml** | application/vnd.google-earth.kml+xml | - -**.xlf** | application/x-xliff+xml | - -**.dae** | model/vnd.collada+xml | - -**.gml** | application/gml+xml | - -**.gpx** | application/gpx+xml | - -**.tcx** | application/vnd.garmin.tcx+xml | - -**.amf** | application/x-amf | - -**.3mf** | application/vnd.ms-package.3dmanufacturing-3dmodel+xml | - -**.xfdf** | application/vnd.adobe.xfdf | - -**.owl** | application/owl+xml | - -**.html** | application/xhtml+xml | - -**.php** | text/x-php | - -**.js** | text/javascript | application/x-javascript, application/javascript -**.lua** | text/x-lua | - -**.pl** | text/x-perl | - -**.py** | text/x-python | text/x-script.python, application/x-python -**.rb** | text/x-ruby | application/x-ruby -**.json** | application/json | - -**.geojson** | application/geo+json | - -**.har** | application/json | - -**.gltf** | model/gltf+json | - -**.ndjson** | application/x-ndjson | - -**.rtf** | text/rtf | application/rtf -**.srt** | application/x-subrip | application/x-srt, text/x-srt -**.tcl** | text/x-tcl | application/x-tcl -**.csv** | text/csv | - -**.tsv** | text/tab-separated-values | - -**.vcf** | text/vcard | - -**.ics** | text/calendar | - -**.warc** | application/warc | - -**.vtt** | text/vtt | - -**.sh** | text/x-shellscript | text/x-sh, application/x-shellscript, application/x-sh -**.pbm** | image/x-portable-bitmap | - -**.pgm** | image/x-portable-graymap | - -**.ppm** | image/x-portable-pixmap | - -**.pam** | image/x-portable-arbitrarymap | - +Extension | MIME type
Aliases | Hierarchy +--------- | ---------------------- | --------- +**n/a** | **application/octet-stream** | root +**.xpm** | **image/x-xpixmap** | xpm>root +**.7z** | **application/x-7z-compressed** | 7z>root +**.zip** | **application/zip**
application/x-zip, application/x-zip-compressed | zip>root +**.docx** | **application/vnd.openxmlformats-officedocument.wordprocessingml.document** | docx>zip>root +**.pptx** | **application/vnd.openxmlformats-officedocument.presentationml.presentation** | pptx>zip>root +**.xlsx** | **application/vnd.openxmlformats-officedocument.spreadsheetml.sheet** | xlsx>zip>root +**.epub** | **application/epub+zip** | epub>zip>root +**.apk** | **application/vnd.android.package-archive** | apk>zip>root +**.jar** | **application/java-archive**
application/jar, application/jar-archive, application/x-java-archive | jar>zip>root +**.odt** | **application/vnd.oasis.opendocument.text**
application/x-vnd.oasis.opendocument.text | odt>zip>root +**.ott** | **application/vnd.oasis.opendocument.text-template**
application/x-vnd.oasis.opendocument.text-template | ott>odt>zip>root +**.ods** | **application/vnd.oasis.opendocument.spreadsheet**
application/x-vnd.oasis.opendocument.spreadsheet | ods>zip>root +**.ots** | **application/vnd.oasis.opendocument.spreadsheet-template**
application/x-vnd.oasis.opendocument.spreadsheet-template | ots>ods>zip>root +**.odp** | **application/vnd.oasis.opendocument.presentation**
application/x-vnd.oasis.opendocument.presentation | odp>zip>root +**.otp** | **application/vnd.oasis.opendocument.presentation-template**
application/x-vnd.oasis.opendocument.presentation-template | otp>odp>zip>root +**.odg** | **application/vnd.oasis.opendocument.graphics**
application/x-vnd.oasis.opendocument.graphics | odg>zip>root +**.otg** | **application/vnd.oasis.opendocument.graphics-template**
application/x-vnd.oasis.opendocument.graphics-template | otg>odg>zip>root +**.odf** | **application/vnd.oasis.opendocument.formula**
application/x-vnd.oasis.opendocument.formula | odf>zip>root +**.odc** | **application/vnd.oasis.opendocument.chart**
application/x-vnd.oasis.opendocument.chart | odc>zip>root +**.sxc** | **application/vnd.sun.xml.calc** | sxc>zip>root +**.kmz** | **application/vnd.google-earth.kmz** | kmz>zip>root +**.vsdx** | **application/vnd.ms-visio.drawing.main+xml** | vsdx>zip>root +**.pdf** | **application/pdf**
application/x-pdf | pdf>root +**.fdf** | **application/vnd.fdf** | fdf>root +**n/a** | **application/x-ole-storage** | x-ole-storage>root +**.msi** | **application/x-ms-installer**
application/x-windows-installer, application/x-msi | msi>x-ole-storage>root +**.msg** | **application/vnd.ms-outlook** | msg>x-ole-storage>root +**.xls** | **application/vnd.ms-excel**
application/msexcel | xls>x-ole-storage>root +**.pub** | **application/vnd.ms-publisher** | pub>x-ole-storage>root +**.ppt** | **application/vnd.ms-powerpoint**
application/mspowerpoint | ppt>x-ole-storage>root +**.doc** | **application/msword**
application/vnd.ms-word | doc>x-ole-storage>root +**.ps** | **application/postscript** | ps>root +**.psd** | **image/vnd.adobe.photoshop**
image/x-psd, application/photoshop | psd>root +**.p7s** | **application/pkcs7-signature** | p7s>root +**.ogg** | **application/ogg**
application/x-ogg | ogg>root +**.oga** | **audio/ogg** | oga>ogg>root +**.ogv** | **video/ogg** | ogv>ogg>root +**.png** | **image/png** | png>root +**.png** | **image/vnd.mozilla.apng** | png>png>root +**.jpg** | **image/jpeg** | jpg>root +**.jxl** | **image/jxl** | jxl>root +**.jp2** | **image/jp2** | jp2>root +**.jpf** | **image/jpx** | jpf>root +**.jpm** | **image/jpm**
video/jpm | jpm>root +**.jxs** | **image/jxs** | jxs>root +**.gif** | **image/gif** | gif>root +**.webp** | **image/webp** | webp>root +**.exe** | **application/vnd.microsoft.portable-executable** | exe>root +**n/a** | **application/x-elf** | x-elf>root +**n/a** | **application/x-object** | x-object>x-elf>root +**n/a** | **application/x-executable** | x-executable>x-elf>root +**.so** | **application/x-sharedlib** | so>x-elf>root +**n/a** | **application/x-coredump** | x-coredump>x-elf>root +**.a** | **application/x-archive**
application/x-unix-archive | a>root +**.deb** | **application/vnd.debian.binary-package** | deb>a>root +**.tar** | **application/x-tar** | tar>root +**.xar** | **application/x-xar** | xar>root +**.bz2** | **application/x-bzip2** | bz2>root +**.fits** | **application/fits**
image/fits | fits>root +**.tiff** | **image/tiff** | tiff>root +**.bmp** | **image/bmp**
image/x-bmp, image/x-ms-bmp | bmp>root +**.123** | **application/vnd.lotus-1-2-3** | 123>root +**.ico** | **image/x-icon** | ico>root +**.mp3** | **audio/mpeg**
audio/x-mpeg, audio/mp3 | mp3>root +**.flac** | **audio/flac** | flac>root +**.midi** | **audio/midi**
audio/mid, audio/sp-midi, audio/x-mid, audio/x-midi | midi>root +**.ape** | **audio/ape** | ape>root +**.mpc** | **audio/musepack** | mpc>root +**.amr** | **audio/amr**
audio/amr-nb | amr>root +**.wav** | **audio/wav**
audio/x-wav, audio/vnd.wave, audio/wave | wav>root +**.aiff** | **audio/aiff**
audio/x-aiff | aiff>root +**.au** | **audio/basic** | au>root +**.mpeg** | **video/mpeg** | mpeg>root +**.mov** | **video/quicktime** | mov>root +**.mp4** | **video/mp4** | mp4>root +**.avif** | **image/avif** | avif>mp4>root +**.3gp** | **video/3gpp**
video/3gp, audio/3gpp | 3gp>mp4>root +**.3g2** | **video/3gpp2**
video/3g2, audio/3gpp2 | 3g2>mp4>root +**.mp4** | **audio/mp4**
audio/x-mp4a | mp4>mp4>root +**.mqv** | **video/quicktime** | mqv>mp4>root +**.m4a** | **audio/x-m4a** | m4a>mp4>root +**.m4v** | **video/x-m4v** | m4v>mp4>root +**.heic** | **image/heic** | heic>mp4>root +**.heic** | **image/heic-sequence** | heic>mp4>root +**.heif** | **image/heif** | heif>mp4>root +**.heif** | **image/heif-sequence** | heif>mp4>root +**.mj2** | **video/mj2** | mj2>mp4>root +**.dvb** | **video/vnd.dvb.file** | dvb>mp4>root +**.webm** | **video/webm**
audio/webm | webm>root +**.avi** | **video/x-msvideo**
video/avi, video/msvideo | avi>root +**.flv** | **video/x-flv** | flv>root +**.mkv** | **video/x-matroska** | mkv>root +**.asf** | **video/x-ms-asf**
video/asf, video/x-ms-wmv | asf>root +**.aac** | **audio/aac** | aac>root +**.voc** | **audio/x-unknown** | voc>root +**.m3u** | **application/vnd.apple.mpegurl**
audio/mpegurl | m3u>root +**.rmvb** | **application/vnd.rn-realmedia-vbr** | rmvb>root +**.gz** | **application/gzip**
application/x-gzip, application/x-gunzip, application/gzipped, application/gzip-compressed, application/x-gzip-compressed, gzip/document | gz>root +**.class** | **application/x-java-applet** | class>root +**.swf** | **application/x-shockwave-flash** | swf>root +**.crx** | **application/x-chrome-extension** | crx>root +**.ttf** | **font/ttf**
font/sfnt, application/x-font-ttf, application/font-sfnt | ttf>root +**.woff** | **font/woff** | woff>root +**.woff2** | **font/woff2** | woff2>root +**.otf** | **font/otf** | otf>root +**.ttc** | **font/collection** | ttc>root +**.eot** | **application/vnd.ms-fontobject** | eot>root +**.wasm** | **application/wasm** | wasm>root +**.shx** | **application/vnd.shx** | shx>root +**.shp** | **application/vnd.shp** | shp>shx>root +**.dbf** | **application/x-dbf** | dbf>root +**.dcm** | **application/dicom** | dcm>root +**.rar** | **application/x-rar-compressed**
application/x-rar | rar>root +**.djvu** | **image/vnd.djvu** | djvu>root +**.mobi** | **application/x-mobipocket-ebook** | mobi>root +**.lit** | **application/x-ms-reader** | lit>root +**.bpg** | **image/bpg** | bpg>root +**.cbor** | **application/cbor** | cbor>root +**.sqlite** | **application/vnd.sqlite3**
application/x-sqlite3 | sqlite>root +**.dwg** | **image/vnd.dwg**
image/x-dwg, application/acad, application/x-acad, application/autocad_dwg, application/dwg, application/x-dwg, application/x-autocad, drawing/dwg | dwg>root +**.nes** | **application/vnd.nintendo.snes.rom** | nes>root +**.lnk** | **application/x-ms-shortcut** | lnk>root +**.macho** | **application/x-mach-binary** | macho>root +**.qcp** | **audio/qcelp** | qcp>root +**.icns** | **image/x-icns** | icns>root +**.hdr** | **image/vnd.radiance** | hdr>root +**.mrc** | **application/marc** | mrc>root +**.mdb** | **application/x-msaccess** | mdb>root +**.accdb** | **application/x-msaccess** | accdb>root +**.zst** | **application/zstd** | zst>root +**.cab** | **application/vnd.ms-cab-compressed** | cab>root +**.rpm** | **application/x-rpm** | rpm>root +**.xz** | **application/x-xz** | xz>root +**.lz** | **application/lzip**
application/x-lzip | lz>root +**.torrent** | **application/x-bittorrent** | torrent>root +**.cpio** | **application/x-cpio** | cpio>root +**n/a** | **application/tzif** | tzif>root +**.xcf** | **image/x-xcf** | xcf>root +**.pat** | **image/x-gimp-pat** | pat>root +**.gbr** | **image/x-gimp-gbr** | gbr>root +**.glb** | **model/gltf-binary** | glb>root +**.cab** | **application/x-installshield** | cab>root +**.jxr** | **image/jxr**
image/vnd.ms-photo | jxr>root +**.parquet** | **application/vnd.apache.parquet**
application/x-parquet | parquet>root +**.one** | **application/onenote** | one>root +**.chm** | **application/vnd.ms-htmlhelp** | chm>root +**.wpd** | **application/vnd.wordperfect** | wpd>root +**.dxf** | **image/vnd.dxf** | dxf>root +**.grb** | **application/grib** | grb>root +**n/a** | **application/zlib** | zlib>root +**.txt** | **text/plain** | txt>root +**.svg** | **image/svg+xml** | svg>txt>root +**.html** | **text/html** | html>txt>root +**.xml** | **text/xml**
application/xml | xml>txt>root +**.rss** | **application/rss+xml**
text/rss | rss>xml>txt>root +**.atom** | **application/atom+xml** | atom>xml>txt>root +**.x3d** | **model/x3d+xml** | x3d>xml>txt>root +**.kml** | **application/vnd.google-earth.kml+xml** | kml>xml>txt>root +**.xlf** | **application/x-xliff+xml** | xlf>xml>txt>root +**.dae** | **model/vnd.collada+xml** | dae>xml>txt>root +**.gml** | **application/gml+xml** | gml>xml>txt>root +**.gpx** | **application/gpx+xml** | gpx>xml>txt>root +**.tcx** | **application/vnd.garmin.tcx+xml** | tcx>xml>txt>root +**.amf** | **application/x-amf** | amf>xml>txt>root +**.3mf** | **application/vnd.ms-package.3dmanufacturing-3dmodel+xml** | 3mf>xml>txt>root +**.xfdf** | **application/vnd.adobe.xfdf** | xfdf>xml>txt>root +**.owl** | **application/owl+xml** | owl>xml>txt>root +**.html** | **application/xhtml+xml** | html>xml>txt>root +**.php** | **text/x-php** | php>txt>root +**.js** | **text/javascript**
application/x-javascript, application/javascript | js>txt>root +**.lua** | **text/x-lua** | lua>txt>root +**.pl** | **text/x-perl** | pl>txt>root +**.py** | **text/x-python**
text/x-script.python, application/x-python | py>txt>root +**.rb** | **text/x-ruby**
application/x-ruby | rb>txt>root +**.json** | **application/json** | json>txt>root +**.geojson** | **application/geo+json** | geojson>json>txt>root +**.har** | **application/json** | har>json>txt>root +**.gltf** | **model/gltf+json** | gltf>json>txt>root +**.ndjson** | **application/x-ndjson** | ndjson>txt>root +**.rtf** | **text/rtf**
application/rtf | rtf>txt>root +**.srt** | **application/x-subrip**
application/x-srt, text/x-srt | srt>txt>root +**.tcl** | **text/x-tcl**
application/x-tcl | tcl>txt>root +**.csv** | **text/csv** | csv>txt>root +**.tsv** | **text/tab-separated-values** | tsv>txt>root +**.vcf** | **text/vcard** | vcf>txt>root +**.ics** | **text/calendar** | ics>txt>root +**.warc** | **application/warc** | warc>txt>root +**.vtt** | **text/vtt** | vtt>txt>root +**.sh** | **text/x-shellscript**
text/x-sh, application/x-shellscript, application/x-sh | sh>txt>root +**.pbm** | **image/x-portable-bitmap** | pbm>txt>root +**.pgm** | **image/x-portable-graymap** | pgm>txt>root +**.ppm** | **image/x-portable-pixmap** | ppm>txt>root +**.pam** | **image/x-portable-arbitrarymap** | pam>txt>root +**.eml** | **message/rfc822** | eml>txt>root diff --git a/vendor/github.com/gabriel-vasile/mimetype/tree.go b/vendor/github.com/gabriel-vasile/mimetype/tree.go index edbde89587b..29ef820b7fe 100644 --- a/vendor/github.com/gabriel-vasile/mimetype/tree.go +++ b/vendor/github.com/gabriel-vasile/mimetype/tree.go @@ -24,7 +24,7 @@ var root = newMIME("application/octet-stream", "", woff2, otf, ttc, eot, wasm, shx, dbf, dcm, rar, djvu, mobi, lit, bpg, cbor, sqlite3, dwg, nes, lnk, macho, qcp, icns, hdr, mrc, mdb, accdb, zstd, cab, rpm, xz, lzip, torrent, cpio, tzif, xcf, pat, gbr, glb, cabIS, jxr, parquet, - oneNote, chm, + oneNote, chm, wpd, dxf, grib, zlib, // Keep text last because it is the slowest check. text, ) @@ -65,10 +65,9 @@ var ( jar = newMIME("application/java-archive", ".jar", magic.Jar). alias("application/jar", "application/jar-archive", "application/x-java-archive") apk = newMIME("application/vnd.android.package-archive", ".apk", magic.APK) - ole = newMIME("application/x-ole-storage", "", magic.Ole, msi, aaf, msg, xls, pub, ppt, doc) + ole = newMIME("application/x-ole-storage", "", magic.Ole, msi, msg, xls, pub, ppt, doc) msi = newMIME("application/x-ms-installer", ".msi", magic.Msi). alias("application/x-windows-installer", "application/x-msi") - aaf = newMIME("application/octet-stream", ".aaf", magic.Aaf) doc = newMIME("application/msword", ".doc", magic.Doc). alias("application/vnd.ms-word") ppt = newMIME("application/vnd.ms-powerpoint", ".ppt", magic.Ppt). @@ -83,7 +82,7 @@ var ( alias("application/x-ogg") oggAudio = newMIME("audio/ogg", ".oga", magic.OggAudio) oggVideo = newMIME("video/ogg", ".ogv", magic.OggVideo) - text = newMIME("text/plain", ".txt", magic.Text, svg, html, xml, php, js, lua, perl, python, ruby, json, ndJSON, rtf, srt, tcl, csv, tsv, vCard, iCalendar, warc, vtt, shell, netpbm, netpgm, netppm, netpam) + text = newMIME("text/plain", ".txt", magic.Text, svg, html, xml, php, js, lua, perl, python, ruby, json, ndJSON, rtf, srt, tcl, csv, tsv, vCard, iCalendar, warc, vtt, shell, netpbm, netpgm, netppm, netpam, rfc822) xml = newMIME("text/xml", ".xml", magic.XML, rss, atom, x3d, kml, xliff, collada, gml, gpx, tcx, amf, threemf, xfdf, owl2, xhtml). alias("application/xml") xhtml = newMIME("application/xhtml+xml", ".html", magic.XHTML) @@ -286,4 +285,9 @@ var ( cbor = newMIME("application/cbor", ".cbor", magic.CBOR) oneNote = newMIME("application/onenote", ".one", magic.One) chm = newMIME("application/vnd.ms-htmlhelp", ".chm", magic.CHM) + wpd = newMIME("application/vnd.wordperfect", ".wpd", magic.WPD) + dxf = newMIME("image/vnd.dxf", ".dxf", magic.DXF) + rfc822 = newMIME("message/rfc822", ".eml", magic.RFC822) + grib = newMIME("application/grib", ".grb", magic.GRIB) + zlib = newMIME("application/zlib", "", magic.Zlib) ) diff --git a/vendor/github.com/go-playground/validator/v10/.golangci.yaml b/vendor/github.com/go-playground/validator/v10/.golangci.yaml index dd9c05cc8bf..96337d6cab2 100644 --- a/vendor/github.com/go-playground/validator/v10/.golangci.yaml +++ b/vendor/github.com/go-playground/validator/v10/.golangci.yaml @@ -32,6 +32,7 @@ linters: - maintidx - misspell - mnd + - modernize - nakedret - nestif - nilnil diff --git a/vendor/github.com/go-playground/validator/v10/README.md b/vendor/github.com/go-playground/validator/v10/README.md index cb5d419459b..c6012b58620 100644 --- a/vendor/github.com/go-playground/validator/v10/README.md +++ b/vendor/github.com/go-playground/validator/v10/README.md @@ -123,6 +123,7 @@ validate := validator.New(validator.WithRequiredStructEnabled()) | udp6_addr | User Datagram Protocol Address UDPv6 | | udp_addr | User Datagram Protocol Address UDP | | unix_addr | Unix domain socket end point Address | +| uds_exists | Unix domain socket exists (checks filesystem sockets and Linux abstract sockets) | | uri | URI String | | url | URL String | | http_url | HTTP(s) URL String | @@ -137,6 +138,7 @@ validate := validator.New(validator.WithRequiredStructEnabled()) | alpha | Alpha Only | | alphaspace | Alpha Space | | alphanum | Alphanumeric | +| alphanumspace | Alphanumeric Space | | alphanumunicode | Alphanumeric Unicode | | alphaunicode | Alpha Unicode | | ascii | ASCII | @@ -164,7 +166,8 @@ validate := validator.New(validator.WithRequiredStructEnabled()) | base64 | Base64 String | | base64url | Base64URL String | | base64rawurl | Base64RawURL String | -| bic | Business Identifier Code (ISO 9362) | +| bic_iso_9362_2014 | Business Identifier Code (ISO 9362:2014) | +| bic | Business Identifier Code (ISO 9362:2022) | | bcp47_language_tag | Language tag (BCP 47) | | btc_addr | Bitcoin Address | | btc_addr_bech32 | Bitcoin Bech32 Address (segwit) | diff --git a/vendor/github.com/go-playground/validator/v10/baked_in.go b/vendor/github.com/go-playground/validator/v10/baked_in.go index 8fd55e77ecf..7fcc6737a7a 100644 --- a/vendor/github.com/go-playground/validator/v10/baked_in.go +++ b/vendor/github.com/go-playground/validator/v10/baked_in.go @@ -1,6 +1,7 @@ package validator import ( + "bufio" "bytes" "cmp" "context" @@ -15,6 +16,7 @@ import ( "net/url" "os" "reflect" + "runtime" "strconv" "strings" "sync" @@ -120,6 +122,7 @@ var ( "alpha": isAlpha, "alphaspace": isAlphaSpace, "alphanum": isAlphanum, + "alphanumspace": isAlphaNumericSpace, "alphaunicode": isAlphaUnicode, "alphanumunicode": isAlphanumUnicode, "boolean": isBoolean, @@ -205,6 +208,7 @@ var ( "ip6_addr": isIP6AddrResolvable, "ip_addr": isIPAddrResolvable, "unix_addr": isUnixAddrResolvable, + "uds_exists": isUnixDomainSocketExists, "mac": isMAC, "hostname": isHostnameRFC952, // RFC 952 "hostname_rfc1123": isHostnameRFC1123, // RFC 1123 @@ -237,7 +241,8 @@ var ( "bcp47_language_tag": isBCP47LanguageTag, "postcode_iso3166_alpha2": isPostcodeByIso3166Alpha2, "postcode_iso3166_alpha2_field": isPostcodeByIso3166Alpha2Field, - "bic": isIsoBicFormat, + "bic_iso_9362_2014": isIsoBic2014Format, + "bic": isIsoBic2022Format, "semver": isSemverFormat, "dns_rfc1035_label": isDnsRFC1035LabelFormat, "credit_card": isCreditCard, @@ -533,12 +538,20 @@ func hasMultiByteCharacter(fl FieldLevel) bool { // isPrintableASCII is the validation function for validating if the field's value is a valid printable ASCII character. func isPrintableASCII(fl FieldLevel) bool { - return printableASCIIRegex().MatchString(fl.Field().String()) + field := fl.Field() + if field.Kind() == reflect.String { + return printableASCIIRegex().MatchString(field.String()) + } + return false } // isASCII is the validation function for validating if the field's value is a valid ASCII character. func isASCII(fl FieldLevel) bool { - return aSCIIRegex().MatchString(fl.Field().String()) + field := fl.Field() + if field.Kind() == reflect.String { + return aSCIIRegex().MatchString(field.String()) + } + return false } // isUUID5 is the validation function for validating if the field's value is a valid v5 UUID. @@ -1773,6 +1786,11 @@ func isAlphaSpace(fl FieldLevel) bool { return alphaSpaceRegex().MatchString(fl.Field().String()) } +// isAlphaNumericSpace is the validation function for validating if the current field's value is a valid alphanumeric value with spaces. +func isAlphaNumericSpace(fl FieldLevel) bool { + return alphanNumericSpaceRegex().MatchString(fl.Field().String()) +} + // isAlphaUnicode is the validation function for validating if the current field's value is a valid alpha unicode value. func isAlphaUnicode(fl FieldLevel) bool { return alphaUnicodeRegex().MatchString(fl.Field().String()) @@ -1974,11 +1992,12 @@ func excludedUnless(fl FieldLevel) bool { panic(fmt.Sprintf("Bad param number for excluded_unless %s", fl.FieldName())) } for i := 0; i < len(params); i += 2 { - if !requireCheckFieldValue(fl, params[i], params[i+1], false) { - return !hasValue(fl) + if requireCheckFieldValue(fl, params[i], params[i+1], false) { + return true } } - return true + + return !hasValue(fl) } // excludedWith is the validation function @@ -2595,6 +2614,70 @@ func isUnixAddrResolvable(fl FieldLevel) bool { return err == nil } +// isUnixDomainSocketExists is the validation function for validating if the field's value is an existing Unix domain socket. +// It handles both filesystem-based sockets and Linux abstract sockets. +// It always returns false for Windows. +func isUnixDomainSocketExists(fl FieldLevel) bool { + if runtime.GOOS == "windows" { + return false + } + + sockpath := fl.Field().String() + + if sockpath == "" { + return false + } + + // On Linux, check for abstract sockets (prefixed with @) + if runtime.GOOS == "linux" && strings.HasPrefix(sockpath, "@") { + return isAbstractSocketExists(sockpath) + } + + // For filesystem-based sockets, check if the path exists and is a socket + stats, err := os.Stat(sockpath) + if err != nil { + return false + } + + return stats.Mode().Type() == fs.ModeSocket +} + +// isAbstractSocketExists checks if a Linux abstract socket exists by reading /proc/net/unix. +// Abstract sockets are identified by an @ prefix in human-readable form. +func isAbstractSocketExists(sockpath string) bool { + file, err := os.Open("/proc/net/unix") + if err != nil { + return false + } + defer func() { + _ = file.Close() + }() + + scanner := bufio.NewScanner(file) + + // Skip the header line + if !scanner.Scan() { + return false + } + + // Abstract sockets in /proc/net/unix are represented with @ prefix + // The socket path is the last field in each line + for scanner.Scan() { + line := scanner.Text() + fields := strings.Fields(line) + + // The path is the last field (8th field typically) + if len(fields) >= 8 { + path := fields[len(fields)-1] + if path == sockpath { + return true + } + } + } + + return false +} + func isIP4Addr(fl FieldLevel) bool { val := fl.Field().String() @@ -2943,11 +3026,18 @@ func isBCP47LanguageTag(fl FieldLevel) bool { panic(fmt.Sprintf("Bad field type %s", field.Type())) } -// isIsoBicFormat is the validation function for validating if the current field's value is a valid Business Identifier Code (SWIFT code), defined in ISO 9362 -func isIsoBicFormat(fl FieldLevel) bool { +// isIsoBic2014Format is the validation function for validating if the current field's value is a valid Business Identifier Code (SWIFT code), defined in ISO 9362 2014 +func isIsoBic2014Format(fl FieldLevel) bool { bicString := fl.Field().String() - return bicRegex().MatchString(bicString) + return bic2014Regex().MatchString(bicString) +} + +// isIsoBic2022Format is the validation function for validating if the current field's value is a valid Business Identifier Code (SWIFT code), defined in ISO 9362 2022 +func isIsoBic2022Format(fl FieldLevel) bool { + bicString := fl.Field().String() + + return bic2022Regex().MatchString(bicString) } // isSemverFormat is the validation function for validating if the current field's value is a valid semver version, defined in Semantic Versioning 2.0.0 diff --git a/vendor/github.com/go-playground/validator/v10/cache.go b/vendor/github.com/go-playground/validator/v10/cache.go index fb101b064f9..ab7ffd47f8c 100644 --- a/vendor/github.com/go-playground/validator/v10/cache.go +++ b/vendor/github.com/go-playground/validator/v10/cache.go @@ -289,6 +289,24 @@ func (v *Validate) parseFieldTagsRecursive(tag string, fieldName string, alias s if wrapper, ok := v.validations[current.tag]; ok { current.fn = wrapper.fn current.runValidationWhenNil = wrapper.runValidationOnNil + } else if aliasTag, isAlias := v.aliases[current.tag]; isAlias { + aliasFirst, aliasLast := v.parseFieldTagsRecursive(aliasTag, fieldName, current.tag, true) + + current.tag = aliasFirst.tag + current.fn = aliasFirst.fn + current.runValidationWhenNil = aliasFirst.runValidationWhenNil + current.hasParam = aliasFirst.hasParam + current.param = aliasFirst.param + current.typeof = aliasFirst.typeof + current.hasAlias = true + + if aliasFirst.next != nil { + nextInChain := current.next + current.next = aliasFirst.next + aliasLast.next = nextInChain + aliasLast.isBlockEnd = false + current = aliasLast + } } else { panic(strings.TrimSpace(fmt.Sprintf(undefinedValidation, current.tag, fieldName))) } diff --git a/vendor/github.com/go-playground/validator/v10/country_codes.go b/vendor/github.com/go-playground/validator/v10/country_codes.go index b5f10d3c113..a9330c80820 100644 --- a/vendor/github.com/go-playground/validator/v10/country_codes.go +++ b/vendor/github.com/go-playground/validator/v10/country_codes.go @@ -258,7 +258,7 @@ var iso3166_2 = map[string]struct{}{ "BD-56": {}, "BD-57": {}, "BD-58": {}, "BD-59": {}, "BD-60": {}, "BD-61": {}, "BD-62": {}, "BD-63": {}, "BD-64": {}, "BD-A": {}, "BD-B": {}, "BD-C": {}, "BD-D": {}, "BD-E": {}, "BD-F": {}, - "BD-G": {}, "BE-BRU": {}, "BE-VAN": {}, "BE-VBR": {}, "BE-VLG": {}, + "BD-G": {}, "BD-H": {}, "BE-BRU": {}, "BE-VAN": {}, "BE-VBR": {}, "BE-VLG": {}, "BE-VLI": {}, "BE-VOV": {}, "BE-VWV": {}, "BE-WAL": {}, "BE-WBR": {}, "BE-WHT": {}, "BE-WLG": {}, "BE-WLX": {}, "BE-WNA": {}, "BF-01": {}, "BF-02": {}, "BF-03": {}, "BF-04": {}, "BF-05": {}, "BF-06": {}, @@ -573,7 +573,7 @@ var iso3166_2 = map[string]struct{}{ "ID-KB": {}, "ID-KI": {}, "ID-KU": {}, "ID-KR": {}, "ID-KS": {}, "ID-KT": {}, "ID-LA": {}, "ID-MA": {}, "ID-ML": {}, "ID-MU": {}, "ID-NB": {}, "ID-NT": {}, "ID-NU": {}, "ID-PA": {}, "ID-PB": {}, - "ID-PE": {}, "ID-PP": {}, "ID-PS": {}, "ID-PT": {}, "ID-RI": {}, + "ID-PD": {}, "ID-PE": {}, "ID-PP": {}, "ID-PS": {}, "ID-PT": {}, "ID-RI": {}, "ID-SA": {}, "ID-SB": {}, "ID-SG": {}, "ID-SL": {}, "ID-SM": {}, "ID-SN": {}, "ID-SR": {}, "ID-SS": {}, "ID-ST": {}, "ID-SU": {}, "ID-YO": {}, "IE-C": {}, "IE-CE": {}, "IE-CN": {}, "IE-CO": {}, @@ -587,7 +587,7 @@ var iso3166_2 = map[string]struct{}{ "IN-AS": {}, "IN-BR": {}, "IN-CH": {}, "IN-CT": {}, "IN-DH": {}, "IN-DL": {}, "IN-DN": {}, "IN-GA": {}, "IN-GJ": {}, "IN-HP": {}, "IN-HR": {}, "IN-JH": {}, "IN-JK": {}, "IN-KA": {}, "IN-KL": {}, - "IN-LD": {}, "IN-MH": {}, "IN-ML": {}, "IN-MN": {}, "IN-MP": {}, + "IN-LA": {}, "IN-LD": {}, "IN-MH": {}, "IN-ML": {}, "IN-MN": {}, "IN-MP": {}, "IN-MZ": {}, "IN-NL": {}, "IN-TG": {}, "IN-OR": {}, "IN-PB": {}, "IN-PY": {}, "IN-RJ": {}, "IN-SK": {}, "IN-TN": {}, "IN-TR": {}, "IN-UP": {}, "IN-UT": {}, "IN-WB": {}, "IQ-AN": {}, "IQ-AR": {}, "IQ-BA": {}, @@ -667,7 +667,7 @@ var iso3166_2 = map[string]struct{}{ "KP-08": {}, "KP-09": {}, "KP-10": {}, "KP-13": {}, "KR-11": {}, "KR-26": {}, "KR-27": {}, "KR-28": {}, "KR-29": {}, "KR-30": {}, "KR-31": {}, "KR-41": {}, "KR-42": {}, "KR-43": {}, "KR-44": {}, - "KR-45": {}, "KR-46": {}, "KR-47": {}, "KR-48": {}, "KR-49": {}, + "KR-45": {}, "KR-46": {}, "KR-47": {}, "KR-48": {}, "KR-49": {}, "KR-50": {}, "KW-AH": {}, "KW-FA": {}, "KW-HA": {}, "KW-JA": {}, "KW-KU": {}, "KW-MU": {}, "KZ-10": {}, "KZ-75": {}, "KZ-19": {}, "KZ-11": {}, "KZ-15": {}, "KZ-71": {}, "KZ-23": {}, "KZ-27": {}, "KZ-47": {}, @@ -758,7 +758,7 @@ var iso3166_2 = map[string]struct{}{ "ME-02": {}, "ME-03": {}, "ME-04": {}, "ME-05": {}, "ME-06": {}, "ME-07": {}, "ME-08": {}, "ME-09": {}, "ME-10": {}, "ME-11": {}, "ME-12": {}, "ME-13": {}, "ME-14": {}, "ME-15": {}, "ME-16": {}, - "ME-17": {}, "ME-18": {}, "ME-19": {}, "ME-20": {}, "ME-21": {}, "ME-24": {}, + "ME-17": {}, "ME-18": {}, "ME-19": {}, "ME-20": {}, "ME-21": {}, "ME-22": {}, "ME-23": {}, "ME-24": {}, "ME-25": {}, "MG-A": {}, "MG-D": {}, "MG-F": {}, "MG-M": {}, "MG-T": {}, "MG-U": {}, "MH-ALK": {}, "MH-ALL": {}, "MH-ARN": {}, "MH-AUR": {}, "MH-EBO": {}, "MH-ENI": {}, "MH-JAB": {}, "MH-JAL": {}, "MH-KIL": {}, @@ -856,7 +856,7 @@ var iso3166_2 = map[string]struct{}{ "NO-22": {}, "NP-1": {}, "NP-2": {}, "NP-3": {}, "NP-4": {}, "NP-5": {}, "NP-BA": {}, "NP-BH": {}, "NP-DH": {}, "NP-GA": {}, "NP-JA": {}, "NP-KA": {}, "NP-KO": {}, "NP-LU": {}, "NP-MA": {}, - "NP-ME": {}, "NP-NA": {}, "NP-RA": {}, "NP-SA": {}, "NP-SE": {}, + "NP-ME": {}, "NP-NA": {}, "NP-P1": {}, "NP-P2": {}, "NP-P3": {}, "NP-P4": {}, "NP-P5": {}, "NP-P6": {}, "NP-P7": {}, "NP-RA": {}, "NP-SA": {}, "NP-SE": {}, "NR-01": {}, "NR-02": {}, "NR-03": {}, "NR-04": {}, "NR-05": {}, "NR-06": {}, "NR-07": {}, "NR-08": {}, "NR-09": {}, "NR-10": {}, "NR-11": {}, "NR-12": {}, "NR-13": {}, "NR-14": {}, "NZ-AUK": {}, diff --git a/vendor/github.com/go-playground/validator/v10/currency_codes.go b/vendor/github.com/go-playground/validator/v10/currency_codes.go index d0317f89ccb..83b67290db2 100644 --- a/vendor/github.com/go-playground/validator/v10/currency_codes.go +++ b/vendor/github.com/go-playground/validator/v10/currency_codes.go @@ -10,33 +10,33 @@ var iso4217 = map[string]struct{}{ "BIF": {}, "CVE": {}, "KHR": {}, "XAF": {}, "CAD": {}, "KYD": {}, "CLP": {}, "CLF": {}, "CNY": {}, "COP": {}, "COU": {}, "KMF": {}, "CDF": {}, "NZD": {}, "CRC": {}, - "HRK": {}, "CUP": {}, "CUC": {}, "ANG": {}, "CZK": {}, - "DKK": {}, "DJF": {}, "DOP": {}, "EGP": {}, "SVC": {}, - "ERN": {}, "SZL": {}, "ETB": {}, "FKP": {}, "FJD": {}, - "XPF": {}, "GMD": {}, "GEL": {}, "GHS": {}, "GIP": {}, - "GTQ": {}, "GBP": {}, "GNF": {}, "GYD": {}, "HTG": {}, - "HNL": {}, "HKD": {}, "HUF": {}, "ISK": {}, "IDR": {}, - "XDR": {}, "IRR": {}, "IQD": {}, "ILS": {}, "JMD": {}, - "JPY": {}, "JOD": {}, "KZT": {}, "KES": {}, "KPW": {}, - "KRW": {}, "KWD": {}, "KGS": {}, "LAK": {}, "LBP": {}, - "LSL": {}, "ZAR": {}, "LRD": {}, "LYD": {}, "CHF": {}, - "MOP": {}, "MKD": {}, "MGA": {}, "MWK": {}, "MYR": {}, - "MVR": {}, "MRU": {}, "MUR": {}, "XUA": {}, "MXN": {}, - "MXV": {}, "MDL": {}, "MNT": {}, "MAD": {}, "MZN": {}, - "MMK": {}, "NAD": {}, "NPR": {}, "NIO": {}, "NGN": {}, - "OMR": {}, "PKR": {}, "PAB": {}, "PGK": {}, "PYG": {}, - "PEN": {}, "PHP": {}, "PLN": {}, "QAR": {}, "RON": {}, - "RUB": {}, "RWF": {}, "SHP": {}, "WST": {}, "STN": {}, - "SAR": {}, "RSD": {}, "SCR": {}, "SLL": {}, "SGD": {}, - "XSU": {}, "SBD": {}, "SOS": {}, "SSP": {}, "LKR": {}, - "SDG": {}, "SRD": {}, "SEK": {}, "CHE": {}, "CHW": {}, - "SYP": {}, "TWD": {}, "TJS": {}, "TZS": {}, "THB": {}, - "TOP": {}, "TTD": {}, "TND": {}, "TRY": {}, "TMT": {}, - "UGX": {}, "UAH": {}, "AED": {}, "USN": {}, "UYU": {}, - "UYI": {}, "UYW": {}, "UZS": {}, "VUV": {}, "VES": {}, - "VND": {}, "YER": {}, "ZMW": {}, "ZWL": {}, "XBA": {}, - "XBB": {}, "XBC": {}, "XBD": {}, "XTS": {}, "XXX": {}, - "XAU": {}, "XPD": {}, "XPT": {}, "XAG": {}, + "CUP": {}, "CZK": {}, "DKK": {}, "DJF": {}, "DOP": {}, + "EGP": {}, "SVC": {}, "ERN": {}, "SZL": {}, "ETB": {}, + "FKP": {}, "FJD": {}, "XPF": {}, "GMD": {}, "GEL": {}, + "GHS": {}, "GIP": {}, "GTQ": {}, "GBP": {}, "GNF": {}, + "GYD": {}, "HTG": {}, "HNL": {}, "HKD": {}, "HUF": {}, + "ISK": {}, "IDR": {}, "XDR": {}, "IRR": {}, "IQD": {}, + "ILS": {}, "JMD": {}, "JPY": {}, "JOD": {}, "KZT": {}, + "KES": {}, "KPW": {}, "KRW": {}, "KWD": {}, "KGS": {}, + "LAK": {}, "LBP": {}, "LSL": {}, "ZAR": {}, "LRD": {}, + "LYD": {}, "CHF": {}, "MOP": {}, "MKD": {}, "MGA": {}, + "MWK": {}, "MYR": {}, "MVR": {}, "MRU": {}, "MUR": {}, + "XUA": {}, "MXN": {}, "MXV": {}, "MDL": {}, "MNT": {}, + "MAD": {}, "MZN": {}, "MMK": {}, "NAD": {}, "NPR": {}, + "NIO": {}, "NGN": {}, "OMR": {}, "PKR": {}, "PAB": {}, + "PGK": {}, "PYG": {}, "PEN": {}, "PHP": {}, "PLN": {}, + "QAR": {}, "RON": {}, "RUB": {}, "RWF": {}, "SHP": {}, + "WST": {}, "STN": {}, "SAR": {}, "RSD": {}, "SCR": {}, + "SLE": {}, "SGD": {}, "XSU": {}, "SBD": {}, "SOS": {}, + "SSP": {}, "LKR": {}, "SDG": {}, "SRD": {}, "SEK": {}, + "CHE": {}, "CHW": {}, "SYP": {}, "TWD": {}, "TJS": {}, + "TZS": {}, "THB": {}, "TOP": {}, "TTD": {}, "TND": {}, + "TRY": {}, "TMT": {}, "UGX": {}, "UAH": {}, "AED": {}, + "USN": {}, "UYU": {}, "UYI": {}, "UYW": {}, "UZS": {}, + "VUV": {}, "VES": {}, "VED": {}, "VND": {}, "YER": {}, + "ZMW": {}, "ZWG": {}, "XBA": {}, "XBB": {}, "XBC": {}, + "XBD": {}, "XCG": {}, "XTS": {}, "XXX": {}, "XAU": {}, + "XPD": {}, "XPT": {}, "XAG": {}, } var iso4217_numeric = map[int]struct{}{ @@ -45,35 +45,35 @@ var iso4217_numeric = map[int]struct{}{ 64: {}, 68: {}, 72: {}, 84: {}, 90: {}, 96: {}, 104: {}, 108: {}, 116: {}, 124: {}, 132: {}, 136: {}, 144: {}, 152: {}, 156: {}, - 170: {}, 174: {}, 188: {}, 191: {}, 192: {}, - 203: {}, 208: {}, 214: {}, 222: {}, 230: {}, - 232: {}, 238: {}, 242: {}, 262: {}, 270: {}, - 292: {}, 320: {}, 324: {}, 328: {}, 332: {}, - 340: {}, 344: {}, 348: {}, 352: {}, 356: {}, - 360: {}, 364: {}, 368: {}, 376: {}, 388: {}, - 392: {}, 398: {}, 400: {}, 404: {}, 408: {}, - 410: {}, 414: {}, 417: {}, 418: {}, 422: {}, - 426: {}, 430: {}, 434: {}, 446: {}, 454: {}, - 458: {}, 462: {}, 480: {}, 484: {}, 496: {}, - 498: {}, 504: {}, 512: {}, 516: {}, 524: {}, - 532: {}, 533: {}, 548: {}, 554: {}, 558: {}, - 566: {}, 578: {}, 586: {}, 590: {}, 598: {}, - 600: {}, 604: {}, 608: {}, 634: {}, 643: {}, - 646: {}, 654: {}, 682: {}, 690: {}, 694: {}, - 702: {}, 704: {}, 706: {}, 710: {}, 728: {}, - 748: {}, 752: {}, 756: {}, 760: {}, 764: {}, - 776: {}, 780: {}, 784: {}, 788: {}, 800: {}, - 807: {}, 818: {}, 826: {}, 834: {}, 840: {}, - 858: {}, 860: {}, 882: {}, 886: {}, 901: {}, - 927: {}, 928: {}, 929: {}, 930: {}, 931: {}, - 932: {}, 933: {}, 934: {}, 936: {}, 938: {}, - 940: {}, 941: {}, 943: {}, 944: {}, 946: {}, - 947: {}, 948: {}, 949: {}, 950: {}, 951: {}, - 952: {}, 953: {}, 955: {}, 956: {}, 957: {}, - 958: {}, 959: {}, 960: {}, 961: {}, 962: {}, - 963: {}, 964: {}, 965: {}, 967: {}, 968: {}, - 969: {}, 970: {}, 971: {}, 972: {}, 973: {}, - 975: {}, 976: {}, 977: {}, 978: {}, 979: {}, - 980: {}, 981: {}, 984: {}, 985: {}, 986: {}, - 990: {}, 994: {}, 997: {}, 999: {}, + 170: {}, 174: {}, 188: {}, 192: {}, 203: {}, + 208: {}, 214: {}, 222: {}, 230: {}, 232: {}, + 238: {}, 242: {}, 262: {}, 270: {}, 292: {}, + 320: {}, 324: {}, 328: {}, 332: {}, 340: {}, + 344: {}, 348: {}, 352: {}, 356: {}, 360: {}, + 364: {}, 368: {}, 376: {}, 388: {}, 392: {}, + 398: {}, 400: {}, 404: {}, 408: {}, 410: {}, + 414: {}, 417: {}, 418: {}, 422: {}, 426: {}, + 430: {}, 434: {}, 446: {}, 454: {}, 458: {}, + 462: {}, 480: {}, 484: {}, 496: {}, 498: {}, + 504: {}, 512: {}, 516: {}, 524: {}, 532: {}, + 533: {}, 548: {}, 554: {}, 558: {}, 566: {}, + 578: {}, 586: {}, 590: {}, 598: {}, 600: {}, + 604: {}, 608: {}, 634: {}, 643: {}, 646: {}, + 654: {}, 682: {}, 690: {}, 702: {}, 704: {}, + 706: {}, 710: {}, 728: {}, 748: {}, 752: {}, + 756: {}, 760: {}, 764: {}, 776: {}, 780: {}, + 784: {}, 788: {}, 800: {}, 807: {}, 818: {}, + 826: {}, 834: {}, 840: {}, 858: {}, 860: {}, + 882: {}, 886: {}, 901: {}, 924: {}, 925: {}, + 926: {}, 927: {}, 928: {}, 929: {}, 930: {}, + 933: {}, 934: {}, 936: {}, 938: {}, 940: {}, + 941: {}, 943: {}, 944: {}, 946: {}, 947: {}, + 948: {}, 949: {}, 950: {}, 951: {}, 952: {}, + 953: {}, 955: {}, 956: {}, 957: {}, 958: {}, + 959: {}, 960: {}, 961: {}, 962: {}, 963: {}, + 964: {}, 965: {}, 967: {}, 968: {}, 969: {}, + 970: {}, 971: {}, 972: {}, 973: {}, 975: {}, + 976: {}, 977: {}, 978: {}, 979: {}, 980: {}, + 981: {}, 984: {}, 985: {}, 986: {}, 990: {}, + 994: {}, 997: {}, 999: {}, } diff --git a/vendor/github.com/go-playground/validator/v10/doc.go b/vendor/github.com/go-playground/validator/v10/doc.go index 52918e40939..7b9e77c65fa 100644 --- a/vendor/github.com/go-playground/validator/v10/doc.go +++ b/vendor/github.com/go-playground/validator/v10/doc.go @@ -201,6 +201,15 @@ only for the nil-values). Usage: omitnil +# Omit Zero + +Allows to skip the validation if the value is a zero value. +For pointers, it checks if the pointer is nil or the underlying value is a zero value. +For slices and maps, it checks if the value is nil or empty. +Otherwise, behaves the same as omitempty. + + Usage: omitzero + # Dive This tells the validator to dive into a slice, array or map and validate that @@ -789,6 +798,12 @@ This validates that a string value contains ASCII alphanumeric characters only Usage: alphanum +# Alphanumeric Space + +This validates that a string value contains ASCII alphanumeric characters and spaces only + + Usage: alphanumspace + # Alpha Unicode This validates that a string value contains unicode alpha characters only @@ -1263,6 +1278,15 @@ This validates that a string value contains a valid Unix Address. Usage: unix_addr +# Unix Domain Socket Exists + +This validates that a Unix domain socket file exists at the specified path. +It checks both filesystem-based sockets and Linux abstract sockets (prefixed with @). +For filesystem sockets, it verifies the path exists and is a socket file. +For abstract sockets on Linux, it checks /proc/net/unix. + + Usage: uds_exists + # Media Access Control Address MAC This validates that a string value contains a valid MAC Address. @@ -1378,13 +1402,20 @@ More information on https://pkg.go.dev/golang.org/x/text/language Usage: bcp47_language_tag -BIC (SWIFT code) +BIC (SWIFT code - 2022 standard) -This validates that a string value is a valid Business Identifier Code (SWIFT code), defined in ISO 9362. -More information on https://www.iso.org/standard/60390.html +This validates that a string value is a valid Business Identifier Code (SWIFT code), defined in ISO 9362:2022. +More information on https://www.iso.org/standard/84108.html Usage: bic +BIC (SWIFT code - 2014 standard) + +This validates that a string value is a valid Business Identifier Code (SWIFT code), defined in ISO 9362:2014. +More information on https://www.iso.org/standard/60390.html + + Usage: bic_iso_9362_2014 + # RFC 1035 label This validates that a string value is a valid dns RFC 1035 label, defined in RFC 1035. @@ -1519,7 +1550,7 @@ This package panics when bad input is provided, this is by design, bad code like that should not make it to production. type Test struct { - TestField string `validate:"nonexistantfunction=1"` + TestField string `validate:"nonexistentfunction=1"` } t := &Test{ diff --git a/vendor/github.com/go-playground/validator/v10/regexes.go b/vendor/github.com/go-playground/validator/v10/regexes.go index 0b3615f5e4e..5cf06353481 100644 --- a/vendor/github.com/go-playground/validator/v10/regexes.go +++ b/vendor/github.com/go-playground/validator/v10/regexes.go @@ -9,6 +9,7 @@ const ( alphaRegexString = "^[a-zA-Z]+$" alphaSpaceRegexString = "^[a-zA-Z ]+$" alphaNumericRegexString = "^[a-zA-Z0-9]+$" + alphaNumericSpaceRegexString = "^[a-zA-Z0-9 ]+$" alphaUnicodeRegexString = "^[\\p{L}]+$" alphaUnicodeNumericRegexString = "^[\\p{L}\\p{N}]+$" numericRegexString = "^[-+]?[0-9]+(?:\\.[0-9]+)?$" @@ -20,7 +21,7 @@ const ( hslRegexString = "^hsl\\(\\s*(?:0|[1-9]\\d?|[12]\\d\\d|3[0-5]\\d|360)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*\\)$" hslaRegexString = "^hsla\\(\\s*(?:0|[1-9]\\d?|[12]\\d\\d|3[0-5]\\d|360)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0.[1-9]*)|[01])\\s*\\)$" emailRegexString = "^(?:(?:(?:(?:[a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(?:\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|(?:(?:\\x22)(?:(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(?:\\x20|\\x09)+)?(?:(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(\\x20|\\x09)+)?(?:\\x22))))@(?:(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$" - e164RegexString = "^\\+[1-9]?[0-9]{7,14}$" + e164RegexString = "^\\+?[1-9]\\d{7,14}$" base32RegexString = "^(?:[A-Z2-7]{8})*(?:[A-Z2-7]{2}={6}|[A-Z2-7]{4}={4}|[A-Z2-7]{5}={3}|[A-Z2-7]{7}=|[A-Z2-7]{8})$" base64RegexString = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$" base64URLRegexString = "^(?:[A-Za-z0-9-_]{4})*(?:[A-Za-z0-9-_]{2}==|[A-Za-z0-9-_]{3}=|[A-Za-z0-9-_]{4})$" @@ -68,7 +69,8 @@ const ( hTMLRegexString = `<[/]?([a-zA-Z]+).*?>` jWTRegexString = "^[A-Za-z0-9-_]+\\.[A-Za-z0-9-_]+\\.[A-Za-z0-9-_]*$" splitParamsRegexString = `'[^']*'|\S+` - bicRegexString = `^[A-Za-z]{6}[A-Za-z0-9]{2}([A-Za-z0-9]{3})?$` + bic2014RegexString = `^[A-Za-z]{6}[A-Za-z0-9]{2}([A-Za-z0-9]{3})?$` + bic2022RegexString = `^[A-Z0-9]{4}[A-Z]{2}[A-Z0-9]{2}(?:[A-Z0-9]{3})?$` semverRegexString = `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$` // numbered capture groups https://semver.org/ dnsRegexStringRFC1035Label = "^[a-z]([-a-z0-9]*[a-z0-9])?$" cveRegexString = `^CVE-(1999|2\d{3})-(0[^0]\d{2}|0\d[^0]\d{1}|0\d{2}[^0]|[1-9]{1}\d{3,})$` // CVE Format Id https://cve.mitre.org/cve/identifiers/syntaxchange.html @@ -95,6 +97,7 @@ func lazyRegexCompile(str string) func() *regexp.Regexp { var ( alphaRegex = lazyRegexCompile(alphaRegexString) alphaSpaceRegex = lazyRegexCompile(alphaSpaceRegexString) + alphanNumericSpaceRegex = lazyRegexCompile(alphaNumericSpaceRegexString) alphaNumericRegex = lazyRegexCompile(alphaNumericRegexString) alphaUnicodeRegex = lazyRegexCompile(alphaUnicodeRegexString) alphaUnicodeNumericRegex = lazyRegexCompile(alphaUnicodeNumericRegexString) @@ -153,7 +156,8 @@ var ( hTMLRegex = lazyRegexCompile(hTMLRegexString) jWTRegex = lazyRegexCompile(jWTRegexString) splitParamsRegex = lazyRegexCompile(splitParamsRegexString) - bicRegex = lazyRegexCompile(bicRegexString) + bic2014Regex = lazyRegexCompile(bic2014RegexString) + bic2022Regex = lazyRegexCompile(bic2022RegexString) semverRegex = lazyRegexCompile(semverRegexString) dnsRegexRFC1035Label = lazyRegexCompile(dnsRegexStringRFC1035Label) cveRegex = lazyRegexCompile(cveRegexString) diff --git a/vendor/github.com/go-playground/validator/v10/translations/en/en.go b/vendor/github.com/go-playground/validator/v10/translations/en/en.go index 0bf4f7f9a53..0a629e8e3fa 100644 --- a/vendor/github.com/go-playground/validator/v10/translations/en/en.go +++ b/vendor/github.com/go-playground/validator/v10/translations/en/en.go @@ -1074,6 +1074,26 @@ func RegisterDefaultTranslations(v *validator.Validate, trans ut.Translator) (er translation: "{0} can only contain alphanumeric characters", override: false, }, + { + tag: "alphaspace", + translation: "{0} can only contain alphabetic and space characters", + override: false, + }, + { + tag: "alphanumspace", + translation: "{0} can only contain alphanumeric and space characters", + override: false, + }, + { + tag: "alphaunicode", + translation: "{0} can only contain unicode alphabetic characters", + override: false, + }, + { + tag: "alphanumunicode", + translation: "{0} can only contain unicode alphanumeric characters", + override: false, + }, { tag: "numeric", translation: "{0} must be a valid numeric value", diff --git a/vendor/github.com/go-playground/validator/v10/util.go b/vendor/github.com/go-playground/validator/v10/util.go index b1fd8cc11a6..b74961efd2e 100644 --- a/vendor/github.com/go-playground/validator/v10/util.go +++ b/vendor/github.com/go-playground/validator/v10/util.go @@ -214,7 +214,9 @@ BEGIN: } // if got here there was more namespace, cannot go any deeper - panic("Invalid field namespace") + // return found=false instead of panicking to handle cases like ValidateMap + // where cross-field validators (required_if, etc.) can't navigate non-struct parents + return } // asInt returns the parameter as an int64 diff --git a/vendor/github.com/gofrs/flock/.golangci.yml b/vendor/github.com/gofrs/flock/.golangci.yml index 3ad88a38fcd..bc837b266a9 100644 --- a/vendor/github.com/gofrs/flock/.golangci.yml +++ b/vendor/github.com/gofrs/flock/.golangci.yml @@ -1,5 +1,12 @@ -run: - timeout: 10m +version: "2" + +formatters: + enable: + - gofumpt + - goimports + settings: + gofumpt: + extra-rules: true linters: enable: @@ -18,9 +25,7 @@ linters: - gocritic - godot - godox - - gofumpt - goheader - - goimports - gomoddirectives - goprintffuncname - gosec @@ -31,84 +36,81 @@ linters: - misspell - nolintlint - revive - - stylecheck - - tenv + - staticcheck - testifylint - thelper - unconvert - unparam - usestdlibvars - whitespace - -linters-settings: - misspell: - locale: US - godox: - keywords: - - FIXME - goheader: - template: |- - Copyright 2015 Tim Heckman. All rights reserved. - Copyright 2018-{{ YEAR }} The Gofrs. All rights reserved. - Use of this source code is governed by the BSD 3-Clause - license that can be found in the LICENSE file. - gofumpt: - extra-rules: true - gocritic: - enabled-tags: - - diagnostic - - style - - performance - disabled-checks: - - paramTypeCombine # already handle by gofumpt.extra-rules - - whyNoLint # already handle by nonolint - - unnamedResult - - hugeParam - - sloppyReassign - - rangeValCopy - - octalLiteral - - ptrToRefParam - - appendAssign - - ruleguard - - httpNoBody - - exposedSyncMutex - - revive: - rules: - - name: struct-tag - - name: blank-imports - - name: context-as-argument - - name: context-keys-type - - name: dot-imports - - name: error-return - - name: error-strings - - name: error-naming - - name: exported - - name: if-return - - name: increment-decrement - - name: var-naming - - name: var-declaration - - name: package-comments - - name: range - - name: receiver-naming - - name: time-naming - - name: unexported-return - - name: indent-error-flow - - name: errorf - - name: empty-block - - name: superfluous-else - - name: unused-parameter - - name: unreachable-code - - name: redefines-builtin-id + - wsl_v5 + settings: + gocritic: + disabled-checks: + - paramTypeCombine # already handle by gofumpt.extra-rules + - whyNoLint # already handle by nonolint + - unnamedResult + - hugeParam + - sloppyReassign + - rangeValCopy + - octalLiteral + - ptrToRefParam + - appendAssign + - ruleguard + - httpNoBody + - exposedSyncMutex + enabled-tags: + - diagnostic + - style + - performance + godox: + keywords: + - FIXME + goheader: + template: |- + Copyright 2015 Tim Heckman. All rights reserved. + Copyright 2018-{{ YEAR }} The Gofrs. All rights reserved. + Use of this source code is governed by the BSD 3-Clause + license that can be found in the LICENSE file. + gosec: + excludes: + - G115 + misspell: + locale: US + revive: + rules: + - name: struct-tag + - name: blank-imports + - name: context-as-argument + - name: context-keys-type + - name: dot-imports + - name: error-return + - name: error-strings + - name: error-naming + - name: exported + - name: if-return + - name: increment-decrement + - name: var-naming + - name: var-declaration + - name: package-comments + - name: range + - name: receiver-naming + - name: time-naming + - name: unexported-return + - name: indent-error-flow + - name: errorf + - name: empty-block + - name: superfluous-else + - name: unused-parameter + - name: unreachable-code + - name: redefines-builtin-id + exclusions: + presets: + - comments + - common-false-positives + - std-error-handling issues: - exclude-use-default: true max-issues-per-linter: 0 max-same-issues: 0 -output: - show-stats: true - sort-results: true - sort-order: - - linter - - file diff --git a/vendor/github.com/gofrs/flock/LICENSE b/vendor/github.com/gofrs/flock/LICENSE index 7de525bf027..c785e5e4b24 100644 --- a/vendor/github.com/gofrs/flock/LICENSE +++ b/vendor/github.com/gofrs/flock/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2018-2024, The Gofrs +Copyright (c) 2018-2025, The Gofrs Copyright (c) 2015-2020, Tim Heckman All rights reserved. diff --git a/vendor/github.com/gofrs/flock/flock.go b/vendor/github.com/gofrs/flock/flock.go index ff942b228a6..4cb0746a716 100644 --- a/vendor/github.com/gofrs/flock/flock.go +++ b/vendor/github.com/gofrs/flock/flock.go @@ -1,5 +1,5 @@ // Copyright 2015 Tim Heckman. All rights reserved. -// Copyright 2018-2024 The Gofrs. All rights reserved. +// Copyright 2018-2025 The Gofrs. All rights reserved. // Use of this source code is governed by the BSD 3-Clause // license that can be found in the LICENSE file. @@ -62,6 +62,7 @@ type Flock struct { func New(path string, opts ...Option) *Flock { // create it if it doesn't exist, and open the file read-only. flags := os.O_CREATE + switch runtime.GOOS { case "aix", "solaris", "illumos": // AIX cannot preform write-lock (i.e. exclusive) on a read-only file. @@ -124,6 +125,22 @@ func (f *Flock) RLocked() bool { return f.r } +// Stat returns the FileInfo structure describing the lock file. +// If the lock file does not exist or cannot be accessed, an error is returned. +// +// This can be used to check the modification time of the lock file, +// which is useful for detecting stale locks. +func (f *Flock) Stat() (fs.FileInfo, error) { + f.m.RLock() + defer f.m.RUnlock() + + if f.fh != nil { + return f.fh.Stat() + } + + return os.Stat(f.path) +} + func (f *Flock) String() string { return f.path } @@ -158,7 +175,6 @@ func tryCtx(ctx context.Context, fn func() (bool, error), retryDelay time.Durati case <-ctx.Done(): return false, ctx.Err() case <-time.After(retryDelay): - // try again } } } diff --git a/vendor/github.com/gofrs/flock/flock_others.go b/vendor/github.com/gofrs/flock/flock_others.go index 18b14f1bd7a..92d0f7e95a9 100644 --- a/vendor/github.com/gofrs/flock/flock_others.go +++ b/vendor/github.com/gofrs/flock/flock_others.go @@ -1,3 +1,8 @@ +// Copyright 2015 Tim Heckman. All rights reserved. +// Copyright 2018-2025 The Gofrs. All rights reserved. +// Use of this source code is governed by the BSD 3-Clause +// license that can be found in the LICENSE file. + //go:build (!unix && !windows) || plan9 package flock diff --git a/vendor/github.com/gofrs/flock/flock_unix.go b/vendor/github.com/gofrs/flock/flock_unix.go index cf8919c7add..77de7a8837a 100644 --- a/vendor/github.com/gofrs/flock/flock_unix.go +++ b/vendor/github.com/gofrs/flock/flock_unix.go @@ -1,5 +1,5 @@ // Copyright 2015 Tim Heckman. All rights reserved. -// Copyright 2018-2024 The Gofrs. All rights reserved. +// Copyright 2018-2025 The Gofrs. All rights reserved. // Use of this source code is governed by the BSD 3-Clause // license that can be found in the LICENSE file. @@ -155,6 +155,7 @@ func (f *Flock) try(locked *bool, flag int) (bool, error) { } var retried bool + retry: err := unix.Flock(int(f.fh.Fd()), flag|unix.LOCK_NB) diff --git a/vendor/github.com/gofrs/flock/flock_unix_fcntl.go b/vendor/github.com/gofrs/flock/flock_unix_fcntl.go index ea007b47d9a..05c2f88c65b 100644 --- a/vendor/github.com/gofrs/flock/flock_unix_fcntl.go +++ b/vendor/github.com/gofrs/flock/flock_unix_fcntl.go @@ -1,5 +1,5 @@ // Copyright 2015 Tim Heckman. All rights reserved. -// Copyright 2018-2024 The Gofrs. All rights reserved. +// Copyright 2018-2025 The Gofrs. All rights reserved. // Use of this source code is governed by the BSD 3-Clause // license that can be found in the LICENSE file. diff --git a/vendor/github.com/gofrs/flock/flock_windows.go b/vendor/github.com/gofrs/flock/flock_windows.go index dfd31e15f50..aa144f156e9 100644 --- a/vendor/github.com/gofrs/flock/flock_windows.go +++ b/vendor/github.com/gofrs/flock/flock_windows.go @@ -1,5 +1,5 @@ // Copyright 2015 Tim Heckman. All rights reserved. -// Copyright 2018-2024 The Gofrs. All rights reserved. +// Copyright 2018-2025 The Gofrs. All rights reserved. // Use of this source code is governed by the BSD 3-Clause // license that can be found in the LICENSE file. @@ -23,6 +23,8 @@ const winLockfileSharedLock = 0x00000000 // ErrorLockViolation is the error code returned from the Windows syscall when a lock would block, // and you ask to fail immediately. +// +//nolint:errname // It should be renamed to `ErrLockViolation`. const ErrorLockViolation windows.Errno = 0x21 // 33 // Lock is a blocking call to try and take an exclusive file lock. diff --git a/vendor/github.com/gomodule/redigo/redis/conn.go b/vendor/github.com/gomodule/redigo/redis/conn.go index 753644b1b0b..dc445a0669b 100644 --- a/vendor/github.com/gomodule/redigo/redis/conn.go +++ b/vendor/github.com/gomodule/redigo/redis/conn.go @@ -332,7 +332,10 @@ func DialURLContext(ctx context.Context, rawurl string, options ...DialOption) ( return nil, err } - if u.Scheme != "redis" && u.Scheme != "rediss" { + switch u.Scheme { + case "redis", "rediss", "valkey", "valkeys": + // valid scheme + default: return nil, fmt.Errorf("invalid redis URL scheme: %s", u.Scheme) } @@ -386,7 +389,7 @@ func DialURLContext(ctx context.Context, rawurl string, options ...DialOption) ( return nil, fmt.Errorf("invalid database: %s", u.Path[1:]) } - options = append(options, DialUseTLS(u.Scheme == "rediss")) + options = append(options, DialUseTLS(u.Scheme == "rediss" || u.Scheme == "valkeys")) return DialContext(ctx, "tcp", address, options...) } diff --git a/vendor/github.com/gomodule/redigo/redis/scan.go b/vendor/github.com/gomodule/redigo/redis/scan.go index 82121011b4d..c1fb800d1a9 100644 --- a/vendor/github.com/gomodule/redigo/redis/scan.go +++ b/vendor/github.com/gomodule/redigo/redis/scan.go @@ -477,7 +477,7 @@ var errScanStructValue = errors.New("redigo.ScanStruct: value must be non-nil po // ScanStruct uses exported field names to match values in the response. Use // 'redis' field tag to override the name: // -// Field int `redis:"myName"` +// Field int `redis:"myName"` // // Fields with the tag redis:"-" are ignored. // @@ -513,9 +513,9 @@ func ScanStruct(src []interface{}, dest interface{}) error { continue } - name, ok := src[i].([]byte) + name, ok := convertToBulk(src[i]) if !ok { - return fmt.Errorf("redigo.ScanStruct: key %d not a bulk string value", i) + return fmt.Errorf("redigo.ScanStruct: key %d not a bulk string value got type: %T", i, src[i]) } fs := ss.fieldSpec(name) @@ -530,6 +530,19 @@ func ScanStruct(src []interface{}, dest interface{}) error { return nil } +// convertToBulk converts src to a []byte if src is a string or bulk string +// and returns true. Otherwise nil and false is returned. +func convertToBulk(src interface{}) ([]byte, bool) { + switch v := src.(type) { + case []byte: + return v, true + case string: + return []byte(v), true + default: + return nil, false + } +} + var ( errScanSliceValue = errors.New("redigo.ScanSlice: dest must be non-nil pointer to a struct") ) diff --git a/vendor/github.com/google/renameio/v2/.golangci.yml b/vendor/github.com/google/renameio/v2/.golangci.yml index abfb6ca0a0f..579e22b543d 100644 --- a/vendor/github.com/google/renameio/v2/.golangci.yml +++ b/vendor/github.com/google/renameio/v2/.golangci.yml @@ -1,5 +1,24 @@ +version: "2" linters: disable: - - errcheck + - errcheck + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: enable: - - gofmt + - gofmt + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/vendor/github.com/google/renameio/v2/README.md b/vendor/github.com/google/renameio/v2/README.md index 703884c2600..8caed1c7aa7 100644 --- a/vendor/github.com/google/renameio/v2/README.md +++ b/vendor/github.com/google/renameio/v2/README.md @@ -1,6 +1,6 @@ [![Build Status](https://github.com/google/renameio/workflows/Test/badge.svg)](https://github.com/google/renameio/actions?query=workflow%3ATest) -[![PkgGoDev](https://pkg.go.dev/badge/github.com/google/renameio)](https://pkg.go.dev/github.com/google/renameio) -[![Go Report Card](https://goreportcard.com/badge/github.com/google/renameio)](https://goreportcard.com/report/github.com/google/renameio) +[![PkgGoDev](https://pkg.go.dev/badge/github.com/google/renameio/v2)](https://pkg.go.dev/github.com/google/renameio/v2) +[![Go Report Card](https://goreportcard.com/badge/github.com/google/renameio/v2)](https://goreportcard.com/report/github.com/google/renameio/v2) The `renameio` Go package provides a way to atomically create or replace a file or symbolic link. diff --git a/vendor/github.com/google/renameio/v2/option.go b/vendor/github.com/google/renameio/v2/option.go index f825f6cf9fe..a86906f4cdd 100644 --- a/vendor/github.com/google/renameio/v2/option.go +++ b/vendor/github.com/google/renameio/v2/option.go @@ -77,3 +77,12 @@ func WithExistingPermissions() Option { c.attemptPermCopy = true }) } + +// WithReplaceOnClose causes PendingFile.Close() to actually call +// CloseAtomicallyReplace(). This means PendingFile implements io.Closer while +// maintaining atomicity per default. +func WithReplaceOnClose() Option { + return optionFunc(func(c *config) { + c.renameOnClose = true + }) +} diff --git a/vendor/github.com/google/renameio/v2/tempfile.go b/vendor/github.com/google/renameio/v2/tempfile.go index edc3e9871cc..98114e539cc 100644 --- a/vendor/github.com/google/renameio/v2/tempfile.go +++ b/vendor/github.com/google/renameio/v2/tempfile.go @@ -114,9 +114,10 @@ func tempDir(dir, dest string) string { type PendingFile struct { *os.File - path string - done bool - closed bool + path string + done bool + closed bool + replaceOnClose bool } // Cleanup is a no-op if CloseAtomicallyReplace succeeded, and otherwise closes @@ -131,7 +132,7 @@ func (t *PendingFile) Cleanup() error { // reporting, there is nothing the caller can recover here. var closeErr error if !t.closed { - closeErr = t.Close() + closeErr = t.File.Close() } if err := os.Remove(t.Name()); err != nil { return err @@ -159,7 +160,7 @@ func (t *PendingFile) CloseAtomicallyReplace() error { return err } t.closed = true - if err := t.Close(); err != nil { + if err := t.File.Close(); err != nil { return err } if err := os.Rename(t.Name(), t.path); err != nil { @@ -169,6 +170,15 @@ func (t *PendingFile) CloseAtomicallyReplace() error { return nil } +// Close closes the file. By default it just calls Close() on the underlying file. For PendingFiles created with +// WithReplaceOnClose it calls CloseAtomicallyReplace() instead. +func (t *PendingFile) Close() error { + if t.replaceOnClose { + return t.CloseAtomicallyReplace() + } + return t.File.Close() +} + // TempFile creates a temporary file destined to atomically creating or // replacing the destination file at path. // @@ -189,6 +199,7 @@ type config struct { attemptPermCopy bool ignoreUmask bool chmod *os.FileMode + renameOnClose bool } // NewPendingFile creates a temporary file destined to atomically creating or @@ -244,7 +255,7 @@ func NewPendingFile(path string, opts ...Option) (*PendingFile, error) { } } - return &PendingFile{File: f, path: cfg.path}, nil + return &PendingFile{File: f, path: cfg.path, replaceOnClose: cfg.renameOnClose}, nil } // Symlink wraps os.Symlink, replacing an existing symlink with the same name diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 244ee19c4bf..af2ef639536 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -27,6 +27,16 @@ Use the links above for more information on each. # changelog +* Oct 20, 2025 - [1.18.1](https://github.com/klauspost/compress/releases/tag/v1.18.1) + * zstd: Add simple zstd EncodeTo/DecodeTo functions https://github.com/klauspost/compress/pull/1079 + * zstd: Fix incorrect buffer size in dictionary encodes https://github.com/klauspost/compress/pull/1059 + * s2: check for cap, not len of buffer in EncodeBetter/Best by @vdarulis in https://github.com/klauspost/compress/pull/1080 + * zlib: Avoiding extra allocation in zlib.reader.Reset by @travelpolicy in https://github.com/klauspost/compress/pull/1086 + * gzhttp: remove redundant err check in zstdReader by @ryanfowler in https://github.com/klauspost/compress/pull/1090 + * flate: Faster load+store https://github.com/klauspost/compress/pull/1104 + * flate: Simplify matchlen https://github.com/klauspost/compress/pull/1101 + * flate: Use exact sizes for huffman tables https://github.com/klauspost/compress/pull/1103 + * Feb 19th, 2025 - [1.18.0](https://github.com/klauspost/compress/releases/tag/v1.18.0) * Add unsafe little endian loaders https://github.com/klauspost/compress/pull/1036 * fix: check `r.err != nil` but return a nil value error `err` by @alingse in https://github.com/klauspost/compress/pull/1028 @@ -36,6 +46,9 @@ Use the links above for more information on each. * flate: Fix matchlen L5+L6 https://github.com/klauspost/compress/pull/1049 * flate: Cleanup & reduce casts https://github.com/klauspost/compress/pull/1050 +
+ See changes to v1.17.x + * Oct 11th, 2024 - [1.17.11](https://github.com/klauspost/compress/releases/tag/v1.17.11) * zstd: Fix extra CRC written with multiple Close calls https://github.com/klauspost/compress/pull/1017 * s2: Don't use stack for index tables https://github.com/klauspost/compress/pull/1014 @@ -102,7 +115,8 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839 * flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837 * gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860 - + +
See changes to v1.16.x @@ -669,3 +683,4 @@ Here are other packages of good quality and pure Go (no cgo wrappers or autoconv # license This code is licensed under the same conditions as the original Go code. See LICENSE file. + diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go index af53fb860cc..57d17eeab9e 100644 --- a/vendor/github.com/klauspost/compress/flate/deflate.go +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -6,11 +6,12 @@ package flate import ( - "encoding/binary" "errors" "fmt" "io" "math" + + "github.com/klauspost/compress/internal/le" ) const ( @@ -234,12 +235,9 @@ func (d *compressor) fillWindow(b []byte) { // Calculate 256 hashes at the time (more L1 cache hits) loops := (n + 256 - minMatchLength) / 256 - for j := 0; j < loops; j++ { + for j := range loops { startindex := j * 256 - end := startindex + 256 + minMatchLength - 1 - if end > n { - end = n - } + end := min(startindex+256+minMatchLength-1, n) tocheck := d.window[startindex:end] dstSize := len(tocheck) - minMatchLength + 1 @@ -269,18 +267,12 @@ func (d *compressor) fillWindow(b []byte) { // We only look at chainCount possibilities before giving up. // pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, offset int, ok bool) { - minMatchLook := maxMatchLength - if lookahead < minMatchLook { - minMatchLook = lookahead - } + minMatchLook := min(lookahead, maxMatchLength) win := d.window[0 : pos+minMatchLook] // We quit when we get a match that's at least nice long - nice := len(win) - pos - if d.nice < nice { - nice = d.nice - } + nice := min(d.nice, len(win)-pos) // If we've got a match that's good enough, only look in 1/4 the chain. tries := d.chain @@ -288,10 +280,7 @@ func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, of wEnd := win[pos+length] wPos := win[pos:] - minIndex := pos - windowSize - if minIndex < 0 { - minIndex = 0 - } + minIndex := max(pos-windowSize, 0) offset = 0 if d.chain < 100 { @@ -374,7 +363,7 @@ func (d *compressor) writeStoredBlock(buf []byte) error { // of the supplied slice. // The caller must ensure that len(b) >= 4. func hash4(b []byte) uint32 { - return hash4u(binary.LittleEndian.Uint32(b), hashBits) + return hash4u(le.Load32(b, 0), hashBits) } // hash4 returns the hash of u to fit in a hash table with h bits. @@ -389,7 +378,7 @@ func bulkHash4(b []byte, dst []uint32) { if len(b) < 4 { return } - hb := binary.LittleEndian.Uint32(b) + hb := le.Load32(b, 0) dst[0] = hash4u(hb, hashBits) end := len(b) - 4 + 1 @@ -432,7 +421,9 @@ func (d *compressor) deflateLazy() { d.h = newHuffmanEncoder(maxFlateBlockTokens) } var tmp [256]uint16 - for _, v := range d.window[s.index:d.windowEnd] { + toIndex := d.window[s.index:d.windowEnd] + toIndex = toIndex[:min(len(toIndex), maxFlateBlockTokens)] + for _, v := range toIndex { tmp[v]++ } d.h.generate(tmp[:], 15) @@ -480,10 +471,7 @@ func (d *compressor) deflateLazy() { prevOffset := s.offset s.length = minMatchLength - 1 s.offset = 0 - minIndex := s.index - windowSize - if minIndex < 0 { - minIndex = 0 - } + minIndex := max(s.index-windowSize, 0) if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy { if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead); ok { @@ -503,10 +491,7 @@ func (d *compressor) deflateLazy() { if prevLength < maxMatchLength-checkOff { prevIndex := s.index - 1 if prevIndex+prevLength < s.maxInsertIndex { - end := lookahead - if lookahead > maxMatchLength+checkOff { - end = maxMatchLength + checkOff - } + end := min(lookahead, maxMatchLength+checkOff) end += prevIndex // Hash at match end. @@ -603,15 +588,9 @@ func (d *compressor) deflateLazy() { // table. newIndex := s.index + prevLength - 1 // Calculate missing hashes - end := newIndex - if end > s.maxInsertIndex { - end = s.maxInsertIndex - } + end := min(newIndex, s.maxInsertIndex) end += minMatchLength - 1 - startindex := s.index + 1 - if startindex > s.maxInsertIndex { - startindex = s.maxInsertIndex - } + startindex := min(s.index+1, s.maxInsertIndex) tocheck := d.window[startindex:end] dstSize := len(tocheck) - minMatchLength + 1 if dstSize > 0 { diff --git a/vendor/github.com/klauspost/compress/flate/dict_decoder.go b/vendor/github.com/klauspost/compress/flate/dict_decoder.go index bb36351a5af..cb855abc4ba 100644 --- a/vendor/github.com/klauspost/compress/flate/dict_decoder.go +++ b/vendor/github.com/klauspost/compress/flate/dict_decoder.go @@ -104,10 +104,7 @@ func (dd *dictDecoder) writeCopy(dist, length int) int { dstBase := dd.wrPos dstPos := dstBase srcPos := dstPos - dist - endPos := dstPos + length - if endPos > len(dd.hist) { - endPos = len(dd.hist) - } + endPos := min(dstPos+length, len(dd.hist)) // Copy non-overlapping section after destination position. // diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go index 0e8b1630c04..791c9dcbfd9 100644 --- a/vendor/github.com/klauspost/compress/flate/fast_encoder.go +++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go @@ -7,7 +7,6 @@ package flate import ( "fmt" - "math/bits" "github.com/klauspost/compress/internal/le" ) @@ -151,29 +150,9 @@ func (e *fastGen) matchlen(s, t int, src []byte) int32 { panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) } } - s1 := min(s+maxMatchLength-4, len(src)) - left := s1 - s - n := int32(0) - for left >= 8 { - diff := le.Load64(src, s) ^ le.Load64(src, t) - if diff != 0 { - return n + int32(bits.TrailingZeros64(diff)>>3) - } - s += 8 - t += 8 - n += 8 - left -= 8 - } - - a := src[s:s1] + a := src[s:min(s+maxMatchLength-4, len(src))] b := src[t:] - for i := range a { - if a[i] != b[i] { - break - } - n++ - } - return n + return int32(matchLen(a, b)) } // matchlenLong will return the match length between offsets and t in src. @@ -193,29 +172,7 @@ func (e *fastGen) matchlenLong(s, t int, src []byte) int32 { panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) } } - // Extend the match to be as long as possible. - left := len(src) - s - n := int32(0) - for left >= 8 { - diff := le.Load64(src, s) ^ le.Load64(src, t) - if diff != 0 { - return n + int32(bits.TrailingZeros64(diff)>>3) - } - s += 8 - t += 8 - n += 8 - left -= 8 - } - - a := src[s:] - b := src[t:] - for i := range a { - if a[i] != b[i] { - break - } - n++ - } - return n + return int32(matchLen(src[s:], src[t:])) } // Reset the encoding table. diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go index afdc8c053a0..7151140ccd7 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -211,7 +211,9 @@ func (w *huffmanBitWriter) flush() { n++ } w.bits = 0 - w.write(w.bytes[:n]) + if n > 0 { + w.write(w.bytes[:n]) + } w.nbytes = 0 } @@ -303,10 +305,7 @@ func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litE w.codegenFreq[size]++ count-- for count >= 3 { - n := 6 - if n > count { - n = count - } + n := min(6, count) codegen[outIndex] = 16 outIndex++ codegen[outIndex] = uint8(n - 3) @@ -316,10 +315,7 @@ func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litE } } else { for count >= 11 { - n := 138 - if n > count { - n = count - } + n := min(138, count) codegen[outIndex] = 18 outIndex++ codegen[outIndex] = uint8(n - 11) @@ -438,8 +434,8 @@ func (w *huffmanBitWriter) writeOutBits() { w.nbits -= 48 n := w.nbytes - // We over-write, but faster... - le.Store64(w.bytes[n:], bits) + // We overwrite, but faster... + le.Store64(w.bytes[:], n, bits) n += 6 if n >= bufferFlushSize { @@ -472,7 +468,7 @@ func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, n w.writeBits(int32(numOffsets-1), 5) w.writeBits(int32(numCodegens-4), 4) - for i := 0; i < numCodegens; i++ { + for i := range numCodegens { value := uint(w.codegenEncoding.codes[codegenOrder[i]].len()) w.writeBits(int32(value), 3) } @@ -650,7 +646,7 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b w.lastHeader = 0 } - numLiterals, numOffsets := w.indexTokens(tokens, !sync) + numLiterals, numOffsets := w.indexTokens(tokens, true) extraBits := 0 ssize, storable := w.storedSize(input) @@ -785,7 +781,7 @@ func (w *huffmanBitWriter) fillTokens() { // literalFreq and offsetFreq, and generates literalEncoding // and offsetEncoding. // The number of literal and offset tokens is returned. -func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) { +func (w *huffmanBitWriter) indexTokens(t *tokens, alwaysEOB bool) (numLiterals, numOffsets int) { //copy(w.literalFreq[:], t.litHist[:]) *(*[256]uint16)(w.literalFreq[:]) = t.litHist //copy(w.literalFreq[256:], t.extraHist[:]) @@ -795,9 +791,10 @@ func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, num if t.n == 0 { return } - if filled { - return maxNumLit, maxNumDist + if alwaysEOB { + w.literalFreq[endBlockMarker] = 1 } + // get the number of literals numLiterals = len(w.literalFreq) for w.literalFreq[numLiterals-1] == 0 { @@ -855,8 +852,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits |= c.code64() << (nbits & 63) nbits += c.len() if nbits >= 48 { - le.Store64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + le.Store64(w.bytes[:], nbytes, bits) bits >>= 48 nbits -= 48 nbytes += 6 @@ -883,8 +879,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits |= c.code64() << (nbits & 63) nbits += c.len() if nbits >= 48 { - le.Store64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + le.Store64(w.bytes[:], nbytes, bits) bits >>= 48 nbits -= 48 nbytes += 6 @@ -906,8 +901,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits |= uint64(extraLength) << (nbits & 63) nbits += extraLengthBits if nbits >= 48 { - le.Store64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + le.Store64(w.bytes[:], nbytes, bits) bits >>= 48 nbits -= 48 nbytes += 6 @@ -932,8 +926,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits |= c.code64() << (nbits & 63) nbits += c.len() if nbits >= 48 { - le.Store64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + le.Store64(w.bytes[:], nbytes, bits) bits >>= 48 nbits -= 48 nbytes += 6 @@ -954,8 +947,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63) nbits += uint8(offsetComb) if nbits >= 48 { - le.Store64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + le.Store64(w.bytes[:], nbytes, bits) bits >>= 48 nbits -= 48 nbytes += 6 @@ -1108,7 +1100,7 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { // We must have at least 48 bits free. if nbits >= 8 { n := nbits >> 3 - le.Store64(w.bytes[nbytes:], bits) + le.Store64(w.bytes[:], nbytes, bits) bits >>= (n * 8) & 63 nbits -= n * 8 nbytes += n @@ -1137,8 +1129,7 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { // Remaining... for _, t := range input { if nbits >= 48 { - le.Store64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + le.Store64(w.bytes[:], nbytes, bits) bits >>= 48 nbits -= 48 nbytes += 6 diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go index be7b58b473f..5f901bd0fe8 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_code.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go @@ -91,7 +91,7 @@ func generateFixedLiteralEncoding() *huffmanEncoder { h := newHuffmanEncoder(literalCount) codes := h.codes var ch uint16 - for ch = 0; ch < literalCount; ch++ { + for ch = range uint16(literalCount) { var bits uint16 var size uint8 switch { diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go index 0d7b437f1c6..6e90126db03 100644 --- a/vendor/github.com/klauspost/compress/flate/inflate.go +++ b/vendor/github.com/klauspost/compress/flate/inflate.go @@ -485,7 +485,7 @@ func (f *decompressor) readHuffman() error { f.nb -= 5 + 5 + 4 // (HCLEN+4)*3 bits: code lengths in the magic codeOrder order. - for i := 0; i < nclen; i++ { + for i := range nclen { for f.nb < 3 { if err := f.moreBits(); err != nil { return err @@ -776,7 +776,7 @@ func fixedHuffmanDecoderInit() { fixedOnce.Do(func() { // These come from the RFC section 3.2.6. var bits [288]int - for i := 0; i < 144; i++ { + for i := range 144 { bits[i] = 8 } for i := 144; i < 256; i++ { diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go index 6e5c21502f7..a22ad7d1255 100644 --- a/vendor/github.com/klauspost/compress/flate/level5.go +++ b/vendor/github.com/klauspost/compress/flate/level5.go @@ -677,10 +677,7 @@ func (e *fastEncL5Window) matchlen(s, t int32, src []byte) int32 { panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) } } - s1 := int(s) + maxMatchLength - 4 - if s1 > len(src) { - s1 = len(src) - } + s1 := min(int(s)+maxMatchLength-4, len(src)) // Extend the match to be as long as possible. return int32(matchLen(src[s:s1], src[t:])) diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go index 13b9b100dbc..455ed3e2b56 100644 --- a/vendor/github.com/klauspost/compress/flate/stateless.go +++ b/vendor/github.com/klauspost/compress/flate/stateless.go @@ -56,18 +56,24 @@ func NewStatelessWriter(dst io.Writer) io.WriteCloser { // bitWriterPool contains bit writers that can be reused. var bitWriterPool = sync.Pool{ - New: func() interface{} { + New: func() any { return newHuffmanBitWriter(nil) }, } +// tokensPool contains tokens struct objects that can be reused +var tokensPool = sync.Pool{ + New: func() any { + return &tokens{} + }, +} + // StatelessDeflate allows compressing directly to a Writer without retaining state. // When returning everything will be flushed. // Up to 8KB of an optional dictionary can be given which is presumed to precede the block. // Longer dictionaries will be truncated and will still produce valid output. // Sending nil dictionary is perfectly fine. func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error { - var dst tokens bw := bitWriterPool.Get().(*huffmanBitWriter) bw.reset(out) defer func() { @@ -91,6 +97,12 @@ func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error { // For subsequent loops, keep shallow dict reference to avoid alloc+copy. var inDict []byte + dst := tokensPool.Get().(*tokens) + dst.Reset() + defer func() { + tokensPool.Put(dst) + }() + for len(in) > 0 { todo := in if len(inDict) > 0 { @@ -113,9 +125,9 @@ func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error { } // Compress if len(inDict) == 0 { - statelessEnc(&dst, todo, int16(len(dict))) + statelessEnc(dst, todo, int16(len(dict))) } else { - statelessEnc(&dst, inDict[:maxStatelessDict+len(todo)], maxStatelessDict) + statelessEnc(dst, inDict[:maxStatelessDict+len(todo)], maxStatelessDict) } isEof := eof && len(in) == 0 @@ -129,7 +141,7 @@ func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error { // If we removed less than 1/16th, huffman compress the block. bw.writeBlockHuff(isEof, uncompressed, len(in) == 0) } else { - bw.writeBlockDynamic(&dst, isEof, uncompressed, len(in) == 0) + bw.writeBlockDynamic(dst, isEof, uncompressed, len(in) == 0) } if len(in) > 0 { // Retain a dict if we have more @@ -184,7 +196,7 @@ func statelessEnc(dst *tokens, src []byte, startAt int16) { // Index until startAt if startAt > 0 { cv := load3232(src, 0) - for i := int16(0); i < startAt; i++ { + for i := range startAt { table[hashSL(cv)] = tableEntry{offset: i} cv = (cv >> 8) | (uint32(src[i+4]) << 24) } diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go index e82fa3bb7b6..d58b3fe4237 100644 --- a/vendor/github.com/klauspost/compress/fse/bitwriter.go +++ b/vendor/github.com/klauspost/compress/fse/bitwriter.go @@ -143,7 +143,7 @@ func (b *bitWriter) flush32() { // flushAlign will flush remaining full bytes and align to next byte boundary. func (b *bitWriter) flushAlign() { nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { + for i := range nbBytes { b.out = append(b.out, byte(b.bitContainer>>(i*8))) } b.nBits = 0 diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go index 074018d8f94..8c8baa4fc2c 100644 --- a/vendor/github.com/klauspost/compress/fse/compress.go +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -396,7 +396,7 @@ func (s *Scratch) buildCTable() error { if v > largeLimit { s.zeroBits = true } - for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + for range v { tableSymbol[position] = symbol position = (position + step) & tableMask for position > highThreshold { diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go index 0ebc9aaac76..41db94cded0 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -85,7 +85,7 @@ func (b *bitWriter) flush32() { // flushAlign will flush remaining full bytes and align to next byte boundary. func (b *bitWriter) flushAlign() { nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { + for i := range nbBytes { b.out = append(b.out, byte(b.bitContainer>>(i*8))) } b.nBits = 0 diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go index 84aa3d12f00..a97cf1b5d35 100644 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -276,7 +276,7 @@ func (s *Scratch) compress4X(src []byte) ([]byte, error) { offsetIdx := len(s.Out) s.Out = append(s.Out, sixZeros[:]...) - for i := 0; i < 4; i++ { + for i := range 4 { toDo := src if len(toDo) > segmentSize { toDo = toDo[:segmentSize] @@ -312,7 +312,7 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { segmentSize := (len(src) + 3) / 4 var wg sync.WaitGroup wg.Add(4) - for i := 0; i < 4; i++ { + for i := range 4 { toDo := src if len(toDo) > segmentSize { toDo = toDo[:segmentSize] @@ -326,7 +326,7 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { }(i) } wg.Wait() - for i := 0; i < 4; i++ { + for i := range 4 { o := s.tmpOut[i] if len(o) > math.MaxUint16 { // We cannot store the size in the jump table diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 0f56b02d747..7d0efa8818a 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -626,7 +626,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { var br [4]bitReaderBytes start := 6 - for i := 0; i < 3; i++ { + for i := range 3 { length := int(src[i*2]) | (int(src[i*2+1]) << 8) if start+length >= len(src) { return nil, errors.New("truncated input (or invalid offset)") @@ -798,10 +798,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } + endsAt := min(offset+remainBytes, len(out)) br := &br[i] bitsLeft := br.remaining() for bitsLeft > 0 { @@ -864,7 +861,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { var br [4]bitReaderBytes start := 6 - for i := 0; i < 3; i++ { + for i := range 3 { length := int(src[i*2]) | (int(src[i*2+1]) << 8) if start+length >= len(src) { return nil, errors.New("truncated input (or invalid offset)") @@ -1035,10 +1032,7 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } + endsAt := min(offset+remainBytes, len(out)) br := &br[i] bitsLeft := br.remaining() for bitsLeft > 0 { diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go index ba7e8e6b027..99ddd4af97c 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go @@ -58,7 +58,7 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { var br [4]bitReaderShifted // Decode "jump table" start := 6 - for i := 0; i < 3; i++ { + for i := range 3 { length := int(src[i*2]) | (int(src[i*2+1]) << 8) if start+length >= len(src) { return nil, errors.New("truncated input (or invalid offset)") @@ -109,10 +109,7 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } + endsAt := min(offset+remainBytes, len(out)) br := &br[i] bitsLeft := br.remaining() for bitsLeft > 0 { diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go index 77ecd68e0a7..67d9e05b6cc 100644 --- a/vendor/github.com/klauspost/compress/huff0/huff0.go +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -201,7 +201,7 @@ func (c cTable) write(s *Scratch) error { for i := range hist[:16] { hist[i] = 0 } - for n := uint8(0); n < maxSymbolValue; n++ { + for n := range maxSymbolValue { v := bitsToWeight[c[n].nBits] & 15 huffWeight[n] = v hist[v]++ @@ -271,7 +271,7 @@ func (c cTable) estTableSize(s *Scratch) (sz int, err error) { for i := range hist[:16] { hist[i] = 0 } - for n := uint8(0); n < maxSymbolValue; n++ { + for n := range maxSymbolValue { v := bitsToWeight[c[n].nBits] & 15 huffWeight[n] = v hist[v]++ diff --git a/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go b/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go index 0cfb5c0e278..4f2a0d8c58d 100644 --- a/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go +++ b/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go @@ -37,6 +37,6 @@ func Store32(b []byte, v uint32) { } // Store64 will store v at b. -func Store64(b []byte, v uint64) { - binary.LittleEndian.PutUint64(b, v) +func Store64[I Indexer](b []byte, i I, v uint64) { + binary.LittleEndian.PutUint64(b[i:], v) } diff --git a/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go b/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go index ada45cd909e..218a38bc4a5 100644 --- a/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go +++ b/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go @@ -38,18 +38,15 @@ func Load64[I Indexer](b []byte, i I) uint64 { // Store16 will store v at b. func Store16(b []byte, v uint16) { - //binary.LittleEndian.PutUint16(b, v) *(*uint16)(unsafe.Pointer(unsafe.SliceData(b))) = v } // Store32 will store v at b. func Store32(b []byte, v uint32) { - //binary.LittleEndian.PutUint32(b, v) *(*uint32)(unsafe.Pointer(unsafe.SliceData(b))) = v } -// Store64 will store v at b. -func Store64(b []byte, v uint64) { - //binary.LittleEndian.PutUint64(b, v) - *(*uint64)(unsafe.Pointer(unsafe.SliceData(b))) = v +// Store64 will store v at b[i:]. +func Store64[I Indexer](b []byte, i I, v uint64) { + *(*uint64)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i)) = v } diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode.go b/vendor/github.com/klauspost/compress/internal/snapref/decode.go index 40796a49d65..a2c82fcd226 100644 --- a/vendor/github.com/klauspost/compress/internal/snapref/decode.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode.go @@ -209,7 +209,7 @@ func (r *Reader) fill() error { if !r.readFull(r.buf[:len(magicBody)], false) { return r.err } - for i := 0; i < len(magicBody); i++ { + for i := range len(magicBody) { if r.buf[i] != magicBody[i] { r.err = ErrCorrupt return r.err diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode.go b/vendor/github.com/klauspost/compress/internal/snapref/encode.go index 13c6040a5de..860a994167a 100644 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode.go @@ -20,8 +20,10 @@ import ( func Encode(dst, src []byte) []byte { if n := MaxEncodedLen(len(src)); n < 0 { panic(ErrTooLarge) - } else if len(dst) < n { + } else if cap(dst) < n { dst = make([]byte, n) + } else { + dst = dst[:n] } // The block starts with the varint-encoded length of the decompressed bytes. diff --git a/vendor/github.com/klauspost/compress/s2/README.md b/vendor/github.com/klauspost/compress/s2/README.md index 1d9220cbf56..b0bf59fbbd2 100644 --- a/vendor/github.com/klauspost/compress/s2/README.md +++ b/vendor/github.com/klauspost/compress/s2/README.md @@ -1,3 +1,17 @@ +# MinLZ + +I have taken the experiences from this library and created a backwards compatible compression package called MinLZ. + +That package will seamlessly decode S2 content, making the transition from this package fairly trivial. + +There are many improvements to pretty much all aspects of S2 since we have "broken free" of the Snappy format specification. +You can read a writeup on [Design and Improvements over S2](https://gist.github.com/klauspost/a25b66198cdbdf7b5b224f670c894ed5). + +The only aspect not covered is custom dictionary encoding. While I do intend to fix errors in this package, +I do not expect to make significant improvements, since I consider MinLZ a better basis for going forward. + +See https://github.com/minio/minlz for all details. + # S2 Compression S2 is an extension of [Snappy](https://github.com/google/snappy). diff --git a/vendor/github.com/klauspost/compress/s2/encode.go b/vendor/github.com/klauspost/compress/s2/encode.go index 20b802270a7..330e755716f 100644 --- a/vendor/github.com/klauspost/compress/s2/encode.go +++ b/vendor/github.com/klauspost/compress/s2/encode.go @@ -117,8 +117,10 @@ func EstimateBlockSize(src []byte) (d int) { func EncodeBetter(dst, src []byte) []byte { if n := MaxEncodedLen(len(src)); n < 0 { panic(ErrTooLarge) - } else if len(dst) < n { + } else if cap(dst) < n { dst = make([]byte, n) + } else { + dst = dst[:n] } // The block starts with the varint-encoded length of the decompressed bytes. @@ -159,8 +161,10 @@ func EncodeBetter(dst, src []byte) []byte { func EncodeBest(dst, src []byte) []byte { if n := MaxEncodedLen(len(src)); n < 0 { panic(ErrTooLarge) - } else if len(dst) < n { + } else if cap(dst) < n { dst = make([]byte, n) + } else { + dst = dst[:n] } // The block starts with the varint-encoded length of the decompressed bytes. diff --git a/vendor/github.com/klauspost/compress/s2/encode_all.go b/vendor/github.com/klauspost/compress/s2/encode_all.go index a473b645291..9d12c44f38a 100644 --- a/vendor/github.com/klauspost/compress/s2/encode_all.go +++ b/vendor/github.com/klauspost/compress/s2/encode_all.go @@ -903,10 +903,7 @@ func encodeBlockDictGo(dst, src []byte, dict *Dict) (d int) { // sLimit is when to stop looking for offset/length copies. The inputMargin // lets us use a fast path for emitLiteral in the main loop, while we are // looking for copies. - sLimit := len(src) - inputMargin - if sLimit > MaxDictSrcOffset-maxAhead { - sLimit = MaxDictSrcOffset - maxAhead - } + sLimit := min(len(src)-inputMargin, MaxDictSrcOffset-maxAhead) // Bail if we can't compress to at least this. dstLimit := len(src) - len(src)>>5 - 5 diff --git a/vendor/github.com/klauspost/compress/s2/encode_best.go b/vendor/github.com/klauspost/compress/s2/encode_best.go index 47bac74234b..c857c5c2839 100644 --- a/vendor/github.com/klauspost/compress/s2/encode_best.go +++ b/vendor/github.com/klauspost/compress/s2/encode_best.go @@ -42,10 +42,7 @@ func encodeBlockBest(dst, src []byte, dict *Dict) (d int) { if len(src) < minNonLiteralBlockSize { return 0 } - sLimitDict := len(src) - inputMargin - if sLimitDict > MaxDictSrcOffset-inputMargin { - sLimitDict = MaxDictSrcOffset - inputMargin - } + sLimitDict := min(len(src)-inputMargin, MaxDictSrcOffset-inputMargin) var lTable [maxLTableSize]uint64 var sTable [maxSTableSize]uint64 diff --git a/vendor/github.com/klauspost/compress/s2/encode_better.go b/vendor/github.com/klauspost/compress/s2/encode_better.go index 90ebf89c204..1e30fb73176 100644 --- a/vendor/github.com/klauspost/compress/s2/encode_better.go +++ b/vendor/github.com/klauspost/compress/s2/encode_better.go @@ -914,10 +914,7 @@ func encodeBlockBetterDict(dst, src []byte, dict *Dict) (d int) { debug = false ) - sLimit := len(src) - inputMargin - if sLimit > MaxDictSrcOffset-maxAhead { - sLimit = MaxDictSrcOffset - maxAhead - } + sLimit := min(len(src)-inputMargin, MaxDictSrcOffset-maxAhead) if len(src) < minNonLiteralBlockSize { return 0 } diff --git a/vendor/github.com/klauspost/compress/s2/index.go b/vendor/github.com/klauspost/compress/s2/index.go index 4229957b96e..fb7db25315a 100644 --- a/vendor/github.com/klauspost/compress/s2/index.go +++ b/vendor/github.com/klauspost/compress/s2/index.go @@ -72,7 +72,7 @@ func (i *Index) add(compressedOffset, uncompressedOffset int64) error { return fmt.Errorf("internal error: Earlier uncompressed received (%d > %d)", latest.uncompressedOffset, uncompressedOffset) } if latest.compressedOffset > compressedOffset { - return fmt.Errorf("internal error: Earlier compressed received (%d > %d)", latest.uncompressedOffset, uncompressedOffset) + return fmt.Errorf("internal error: Earlier compressed received (%d > %d)", latest.compressedOffset, compressedOffset) } if latest.uncompressedOffset+minIndexDist > uncompressedOffset { // Only add entry if distance is large enough. diff --git a/vendor/github.com/klauspost/compress/s2/reader.go b/vendor/github.com/klauspost/compress/s2/reader.go index 8372d752f9a..4d01c4190cc 100644 --- a/vendor/github.com/klauspost/compress/s2/reader.go +++ b/vendor/github.com/klauspost/compress/s2/reader.go @@ -1046,7 +1046,7 @@ func (r *Reader) ReadByte() (byte, error) { return c, nil } var tmp [1]byte - for i := 0; i < 10; i++ { + for range 10 { n, err := r.Read(tmp[:]) if err != nil { return 0, err diff --git a/vendor/github.com/klauspost/compress/s2/writer.go b/vendor/github.com/klauspost/compress/s2/writer.go index fd15078f7df..09f1cff3a96 100644 --- a/vendor/github.com/klauspost/compress/s2/writer.go +++ b/vendor/github.com/klauspost/compress/s2/writer.go @@ -47,7 +47,7 @@ func NewWriter(w io.Writer, opts ...WriterOption) *Writer { w2.obufLen = obufHeaderLen + MaxEncodedLen(w2.blockSize) w2.paramsOK = true w2.ibuf = make([]byte, 0, w2.blockSize) - w2.buffers.New = func() interface{} { + w2.buffers.New = func() any { return make([]byte, w2.obufLen) } w2.Reset(w) diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go index 1952f175b0d..b22b297e62a 100644 --- a/vendor/github.com/klauspost/compress/zstd/bitwriter.go +++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go @@ -88,7 +88,7 @@ func (b *bitWriter) flush32() { // flushAlign will flush remaining full bytes and align to next byte boundary. func (b *bitWriter) flushAlign() { nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { + for i := range nbBytes { b.out = append(b.out, byte(b.bitContainer>>(i*8))) } b.nBits = 0 diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 0dd742fd2a6..2329e996f86 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -54,11 +54,11 @@ const ( ) var ( - huffDecoderPool = sync.Pool{New: func() interface{} { + huffDecoderPool = sync.Pool{New: func() any { return &huff0.Scratch{} }} - fseDecoderPool = sync.Pool{New: func() interface{} { + fseDecoderPool = sync.Pool{New: func() any { return &fseDecoder{} }} ) @@ -553,7 +553,7 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { if compMode&3 != 0 { return errors.New("corrupt block: reserved bits not zero") } - for i := uint(0); i < 3; i++ { + for i := range uint(3) { mode := seqCompMode((compMode >> (6 - i*2)) & 3) if debugDecoder { println("Table", tableIndex(i), "is", mode) diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index ea2a19376c1..30df5513d56 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -373,11 +373,9 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { if cap(dst) == 0 && !d.o.limitToCap { // Allocate len(input) * 2 by default if nothing is provided // and we didn't get frame content size. - size := len(input) * 2 - // Cap to 1 MB. - if size > 1<<20 { - size = 1 << 20 - } + size := min( + // Cap to 1 MB. + len(input)*2, 1<<20) if uint64(size) > d.o.maxDecodedSize { size = int(d.o.maxDecodedSize) } diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go index b7b83164bc7..2ffbfdf379e 100644 --- a/vendor/github.com/klauspost/compress/zstd/dict.go +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -194,17 +194,17 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { hist := o.History contents := o.Contents debug := o.DebugOut != nil - println := func(args ...interface{}) { + println := func(args ...any) { if o.DebugOut != nil { fmt.Fprintln(o.DebugOut, args...) } } - printf := func(s string, args ...interface{}) { + printf := func(s string, args ...any) { if o.DebugOut != nil { fmt.Fprintf(o.DebugOut, s, args...) } } - print := func(args ...interface{}) { + print := func(args ...any) { if o.DebugOut != nil { fmt.Fprint(o.DebugOut, args...) } @@ -424,16 +424,10 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { } // Literal table - avgSize := litTotal - if avgSize > huff0.BlockSizeMax/2 { - avgSize = huff0.BlockSizeMax / 2 - } + avgSize := min(litTotal, huff0.BlockSizeMax/2) huffBuff := make([]byte, 0, avgSize) // Target size - div := litTotal / avgSize - if div < 1 { - div = 1 - } + div := max(litTotal/avgSize, 1) if debug { println("Huffman weights:") } @@ -454,7 +448,7 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { huffBuff = append(huffBuff, 255) } scratch := &huff0.Scratch{TableLog: 11} - for tries := 0; tries < 255; tries++ { + for tries := range 255 { scratch = &huff0.Scratch{TableLog: 11} _, _, err = huff0.Compress1X(huffBuff, scratch) if err == nil { @@ -471,7 +465,7 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { // Bail out.... Just generate something huffBuff = append(huffBuff, bytes.Repeat([]byte{255}, 10000)...) - for i := 0; i < 128; i++ { + for i := range 128 { huffBuff = append(huffBuff, byte(i)) } continue diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go index 7d250c67f59..c1192ec38f4 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -8,7 +8,7 @@ import ( ) const ( - dictShardBits = 6 + dictShardBits = 7 ) type fastBase struct { @@ -41,11 +41,9 @@ func (e *fastBase) AppendCRC(dst []byte) []byte { // or a window size small enough to contain the input size, if > 0. func (e *fastBase) WindowSize(size int64) int32 { if size > 0 && size < int64(e.maxMatchOff) { - b := int32(1) << uint(bits.Len(uint(size))) - // Keep minimum window. - if b < 1024 { - b = 1024 - } + b := max( + // Keep minimum window. + int32(1)< tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength { s-- offset-- @@ -382,10 +377,7 @@ encodeLoop: nextEmit = s // Index skipped... - end := s - if s > sLimit+4 { - end = sLimit + 4 - } + end := min(s, sLimit+4) off := index0 + e.cur for index0 < end { cv0 := load6432(src, index0) @@ -444,10 +436,7 @@ encodeLoop: nextEmit = s // Index old s + 1 -> s - 1 or sLimit - end := s - if s > sLimit-4 { - end = sLimit - 4 - } + end := min(s, sLimit-4) off := index0 + e.cur for index0 < end { diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index 84a79fde767..85dcd28c32e 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -190,10 +190,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -252,10 +249,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -480,10 +474,7 @@ encodeLoop: l := matched // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- @@ -719,10 +710,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -783,10 +771,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -1005,10 +990,7 @@ encodeLoop: l := matched // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index d36be7bd8c2..cf8cad00dcf 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -13,7 +13,7 @@ const ( dFastLongLen = 8 // Bytes used for table hash dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table - dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard + dLongTableShardSize = dFastLongTableSize / dLongTableShardCnt // Size of an individual shard dFastShortTableBits = tableBits // Bits used in the short match table dFastShortTableSize = 1 << dFastShortTableBits // Size of the table @@ -149,10 +149,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -266,10 +263,7 @@ encodeLoop: l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- @@ -462,10 +456,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] { repIndex-- start-- @@ -576,10 +567,7 @@ encodeLoop: l := int32(matchLen(src[s+4:], src[t+4:])) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] { s-- t-- @@ -809,10 +797,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -927,10 +912,7 @@ encodeLoop: l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go index f45a3da7dae..9180a3a5820 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -143,10 +143,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } + sMin := max(s-e.maxMatchOff, 0) for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { repIndex-- start-- @@ -223,10 +220,7 @@ encodeLoop: l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- @@ -387,10 +381,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } + sMin := max(s-e.maxMatchOff, 0) for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] { repIndex-- start-- @@ -469,10 +460,7 @@ encodeLoop: l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] { s-- t-- @@ -655,10 +643,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } + sMin := max(s-e.maxMatchOff, 0) for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { repIndex-- start-- @@ -735,10 +720,7 @@ encodeLoop: l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index e47af66e7c9..d88f067e5c2 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -238,10 +238,7 @@ func (d *frameDec) reset(br byteBuffer) error { if d.WindowSize == 0 && d.SingleSegment { // We may not need window in this case. - d.WindowSize = d.FrameContentSize - if d.WindowSize < MinWindowSize { - d.WindowSize = MinWindowSize - } + d.WindowSize = max(d.FrameContentSize, MinWindowSize) if d.WindowSize > d.o.maxDecodedSize { if debugDecoder { printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go index ab26326a8ff..3a0f4e7fbe6 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go @@ -149,7 +149,7 @@ func (s *fseEncoder) buildCTable() error { if v > largeLimit { s.zeroBits = true } - for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + for range v { tableSymbol[position] = symbol position = (position + step) & tableMask for position > highThreshold { diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go index 9a7de82f9ef..0bfb0e43c7b 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -231,10 +231,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error { llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state out := s.out - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } + maxBlockSize := min(s.windowSize, maxCompressedBlockSize) if debugDecoder { println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream") diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go index c59f17e07ad..1f8c3cec28c 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -79,10 +79,7 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { br := s.br - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } + maxBlockSize := min(s.windowSize, maxCompressedBlockSize) ctx := decodeSyncAsmContext{ llTable: s.litLengths.fse.dt[:maxTablesize], @@ -237,10 +234,7 @@ func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmC func (s *sequenceDecs) decode(seqs []seqVals) error { br := s.br - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } + maxBlockSize := min(s.windowSize, maxCompressedBlockSize) ctx := decodeAsmContext{ llTable: s.litLengths.fse.dt[:maxTablesize], diff --git a/vendor/github.com/klauspost/compress/zstd/simple_go124.go b/vendor/github.com/klauspost/compress/zstd/simple_go124.go new file mode 100644 index 00000000000..2efc0497bf9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/simple_go124.go @@ -0,0 +1,56 @@ +// Copyright 2025+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +//go:build go1.24 + +package zstd + +import ( + "errors" + "runtime" + "sync" + "weak" +) + +var weakMu sync.Mutex +var simpleEnc weak.Pointer[Encoder] +var simpleDec weak.Pointer[Decoder] + +// EncodeTo appends the encoded data from src to dst. +func EncodeTo(dst []byte, src []byte) []byte { + weakMu.Lock() + enc := simpleEnc.Value() + if enc == nil { + var err error + enc, err = NewWriter(nil, WithEncoderConcurrency(runtime.NumCPU()), WithWindowSize(1<<20), WithLowerEncoderMem(true), WithZeroFrames(true)) + if err != nil { + panic("failed to create simple encoder: " + err.Error()) + } + simpleEnc = weak.Make(enc) + } + weakMu.Unlock() + + return enc.EncodeAll(src, dst) +} + +// DecodeTo appends the decoded data from src to dst. +// The maximum decoded size is 1GiB, +// not including what may already be in dst. +func DecodeTo(dst []byte, src []byte) ([]byte, error) { + weakMu.Lock() + dec := simpleDec.Value() + if dec == nil { + var err error + dec, err = NewReader(nil, WithDecoderConcurrency(runtime.NumCPU()), WithDecoderLowmem(true), WithDecoderMaxMemory(1<<30)) + if err != nil { + weakMu.Unlock() + return nil, errors.New("failed to create simple decoder: " + err.Error()) + } + runtime.SetFinalizer(dec, func(d *Decoder) { + d.Close() + }) + simpleDec = weak.Make(dec) + } + weakMu.Unlock() + return dec.DecodeAll(src, dst) +} diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go index a17381b8f89..336c2889304 100644 --- a/vendor/github.com/klauspost/compress/zstd/snappy.go +++ b/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -257,7 +257,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { if !r.readFull(r.buf[:len(snappyMagicBody)], false) { return written, r.err } - for i := 0; i < len(snappyMagicBody); i++ { + for i := range len(snappyMagicBody) { if r.buf[i] != snappyMagicBody[i] { println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i) r.err = ErrSnappyCorrupt diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go index 29c15c8c4ef..3198d718926 100644 --- a/vendor/github.com/klauspost/compress/zstd/zip.go +++ b/vendor/github.com/klauspost/compress/zstd/zip.go @@ -19,7 +19,7 @@ const ZipMethodWinZip = 93 const ZipMethodPKWare = 20 // zipReaderPool is the default reader pool. -var zipReaderPool = sync.Pool{New: func() interface{} { +var zipReaderPool = sync.Pool{New: func() any { z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1)) if err != nil { panic(err) diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index 6252b46ae6f..1a869710d2c 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -98,13 +98,13 @@ var ( ErrDecoderNilInput = errors.New("nil input provided as reader") ) -func println(a ...interface{}) { +func println(a ...any) { if debug || debugDecoder || debugEncoder { log.Println(a...) } } -func printf(format string, a ...interface{}) { +func printf(format string, a ...any) { if debug || debugDecoder || debugEncoder { log.Printf(format, a...) } diff --git a/vendor/github.com/klauspost/crc32/.gitignore b/vendor/github.com/klauspost/crc32/.gitignore new file mode 100644 index 00000000000..daf913b1b34 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/klauspost/crc32/LICENSE b/vendor/github.com/klauspost/crc32/LICENSE new file mode 100644 index 00000000000..74487567632 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/crc32/README.md b/vendor/github.com/klauspost/crc32/README.md new file mode 100644 index 00000000000..febeba5ebeb --- /dev/null +++ b/vendor/github.com/klauspost/crc32/README.md @@ -0,0 +1,42 @@ +# 2025 revival + +For IEEE checksums AVX512 can be used to speed up CRC32 checksums by approximately 2x. + +Castagnoli checksums (CRC32C) can also be computer with AVX512, +but the performance gain is not as significant enough for the downsides of using it at this point. + +# crc32 + +This package is a drop-in replacement for the standard library `hash/crc32` package, +that features AVX 512 optimizations on x64 platforms, for a 2x speedup for IEEE CRC32 checksums. + +# usage + +Install using `go get github.com/klauspost/crc32`. This library is based on Go 1.24 + +Replace `import "hash/crc32"` with `import "github.com/klauspost/crc32"` and you are good to go. + +# changes +* 2025: Revived and updated to Go 1.24, with AVX 512 optimizations. + +# performance + +AVX512 are enabled above 1KB input size. This rather high limit is due to AVX512 may be slower to ramp up than +the regular SSE4 implementation for smaller inputs. This is not reflected in the benchmarks below. + +| Benchmark | Old MB/s | New MB/s | Speedup | +|-----------------------------------------------|----------|----------|---------| +| BenchmarkCRC32/poly=IEEE/size=512/align=0-32 | 17996.39 | 17969.94 | 1.00x | +| BenchmarkCRC32/poly=IEEE/size=512/align=1-32 | 18021.48 | 17945.55 | 1.00x | +| BenchmarkCRC32/poly=IEEE/size=1kB/align=0-32 | 19921.70 | 45613.77 | 2.29x | +| BenchmarkCRC32/poly=IEEE/size=1kB/align=1-32 | 19946.60 | 46819.09 | 2.35x | +| BenchmarkCRC32/poly=IEEE/size=4kB/align=0-32 | 21538.65 | 48600.93 | 2.26x | +| BenchmarkCRC32/poly=IEEE/size=4kB/align=1-32 | 21449.20 | 48477.84 | 2.26x | +| BenchmarkCRC32/poly=IEEE/size=32kB/align=0-32 | 21785.49 | 46013.10 | 2.11x | +| BenchmarkCRC32/poly=IEEE/size=32kB/align=1-32 | 21946.47 | 45954.10 | 2.09x | + +cpu: AMD Ryzen 9 9950X 16-Core Processor + +# license + +Standard Go license. See [LICENSE](LICENSE) for details. diff --git a/vendor/github.com/klauspost/crc32/crc32.go b/vendor/github.com/klauspost/crc32/crc32.go new file mode 100644 index 00000000000..1de0bb3a478 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32.go @@ -0,0 +1,253 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package crc32 implements the 32-bit cyclic redundancy check, or CRC-32, +// checksum. See https://en.wikipedia.org/wiki/Cyclic_redundancy_check for +// information. +// +// Polynomials are represented in LSB-first form also known as reversed representation. +// +// See https://en.wikipedia.org/wiki/Mathematics_of_cyclic_redundancy_checks#Reversed_representations_and_reciprocal_polynomials +// for information. +package crc32 + +import ( + "encoding/binary" + "errors" + "hash" + "sync" + "sync/atomic" +) + +// The size of a CRC-32 checksum in bytes. +const Size = 4 + +// Predefined polynomials. +const ( + // IEEE is by far and away the most common CRC-32 polynomial. + // Used by ethernet (IEEE 802.3), v.42, fddi, gzip, zip, png, ... + IEEE = 0xedb88320 + + // Castagnoli's polynomial, used in iSCSI. + // Has better error detection characteristics than IEEE. + // https://dx.doi.org/10.1109/26.231911 + Castagnoli = 0x82f63b78 + + // Koopman's polynomial. + // Also has better error detection characteristics than IEEE. + // https://dx.doi.org/10.1109/DSN.2002.1028931 + Koopman = 0xeb31d82e +) + +// Table is a 256-word table representing the polynomial for efficient processing. +type Table [256]uint32 + +// This file makes use of functions implemented in architecture-specific files. +// The interface that they implement is as follows: +// +// // archAvailableIEEE reports whether an architecture-specific CRC32-IEEE +// // algorithm is available. +// archAvailableIEEE() bool +// +// // archInitIEEE initializes the architecture-specific CRC3-IEEE algorithm. +// // It can only be called if archAvailableIEEE() returns true. +// archInitIEEE() +// +// // archUpdateIEEE updates the given CRC32-IEEE. It can only be called if +// // archInitIEEE() was previously called. +// archUpdateIEEE(crc uint32, p []byte) uint32 +// +// // archAvailableCastagnoli reports whether an architecture-specific +// // CRC32-C algorithm is available. +// archAvailableCastagnoli() bool +// +// // archInitCastagnoli initializes the architecture-specific CRC32-C +// // algorithm. It can only be called if archAvailableCastagnoli() returns +// // true. +// archInitCastagnoli() +// +// // archUpdateCastagnoli updates the given CRC32-C. It can only be called +// // if archInitCastagnoli() was previously called. +// archUpdateCastagnoli(crc uint32, p []byte) uint32 + +// castagnoliTable points to a lazily initialized Table for the Castagnoli +// polynomial. MakeTable will always return this value when asked to make a +// Castagnoli table so we can compare against it to find when the caller is +// using this polynomial. +var castagnoliTable *Table +var castagnoliTable8 *slicing8Table +var updateCastagnoli func(crc uint32, p []byte) uint32 +var haveCastagnoli atomic.Bool + +var castagnoliInitOnce = sync.OnceFunc(func() { + castagnoliTable = simpleMakeTable(Castagnoli) + + if archAvailableCastagnoli() { + archInitCastagnoli() + updateCastagnoli = archUpdateCastagnoli + } else { + // Initialize the slicing-by-8 table. + castagnoliTable8 = slicingMakeTable(Castagnoli) + updateCastagnoli = func(crc uint32, p []byte) uint32 { + return slicingUpdate(crc, castagnoliTable8, p) + } + } + + haveCastagnoli.Store(true) +}) + +// IEEETable is the table for the [IEEE] polynomial. +var IEEETable = simpleMakeTable(IEEE) + +// ieeeTable8 is the slicing8Table for IEEE +var ieeeTable8 *slicing8Table +var updateIEEE func(crc uint32, p []byte) uint32 + +var ieeeInitOnce = sync.OnceFunc(func() { + if archAvailableIEEE() { + archInitIEEE() + updateIEEE = archUpdateIEEE + } else { + // Initialize the slicing-by-8 table. + ieeeTable8 = slicingMakeTable(IEEE) + updateIEEE = func(crc uint32, p []byte) uint32 { + return slicingUpdate(crc, ieeeTable8, p) + } + } +}) + +// MakeTable returns a [Table] constructed from the specified polynomial. +// The contents of this [Table] must not be modified. +func MakeTable(poly uint32) *Table { + switch poly { + case IEEE: + ieeeInitOnce() + return IEEETable + case Castagnoli: + castagnoliInitOnce() + return castagnoliTable + default: + return simpleMakeTable(poly) + } +} + +// digest represents the partial evaluation of a checksum. +type digest struct { + crc uint32 + tab *Table +} + +// New creates a new [hash.Hash32] computing the CRC-32 checksum using the +// polynomial represented by the [Table]. Its Sum method will lay the +// value out in big-endian byte order. The returned Hash32 also +// implements [encoding.BinaryMarshaler] and [encoding.BinaryUnmarshaler] to +// marshal and unmarshal the internal state of the hash. +func New(tab *Table) hash.Hash32 { + if tab == IEEETable { + ieeeInitOnce() + } + return &digest{0, tab} +} + +// NewIEEE creates a new [hash.Hash32] computing the CRC-32 checksum using +// the [IEEE] polynomial. Its Sum method will lay the value out in +// big-endian byte order. The returned Hash32 also implements +// [encoding.BinaryMarshaler] and [encoding.BinaryUnmarshaler] to marshal +// and unmarshal the internal state of the hash. +func NewIEEE() hash.Hash32 { return New(IEEETable) } + +func (d *digest) Size() int { return Size } + +func (d *digest) BlockSize() int { return 1 } + +func (d *digest) Reset() { d.crc = 0 } + +const ( + magic = "crc\x01" + marshaledSize = len(magic) + 4 + 4 +) + +func (d *digest) AppendBinary(b []byte) ([]byte, error) { + b = append(b, magic...) + b = binary.BigEndian.AppendUint32(b, tableSum(d.tab)) + b = binary.BigEndian.AppendUint32(b, d.crc) + return b, nil +} + +func (d *digest) MarshalBinary() ([]byte, error) { + return d.AppendBinary(make([]byte, 0, marshaledSize)) + +} + +func (d *digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("hash/crc32: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("hash/crc32: invalid hash state size") + } + if tableSum(d.tab) != binary.BigEndian.Uint32(b[4:]) { + return errors.New("hash/crc32: tables do not match") + } + d.crc = binary.BigEndian.Uint32(b[8:]) + return nil +} + +func update(crc uint32, tab *Table, p []byte, checkInitIEEE bool) uint32 { + switch { + case haveCastagnoli.Load() && tab == castagnoliTable: + return updateCastagnoli(crc, p) + case tab == IEEETable: + if checkInitIEEE { + ieeeInitOnce() + } + return updateIEEE(crc, p) + default: + return simpleUpdate(crc, tab, p) + } +} + +// Update returns the result of adding the bytes in p to the crc. +func Update(crc uint32, tab *Table, p []byte) uint32 { + // Unfortunately, because IEEETable is exported, IEEE may be used without a + // call to MakeTable. We have to make sure it gets initialized in that case. + return update(crc, tab, p, true) +} + +func (d *digest) Write(p []byte) (n int, err error) { + // We only create digest objects through New() which takes care of + // initialization in this case. + d.crc = update(d.crc, d.tab, p, false) + return len(p), nil +} + +func (d *digest) Sum32() uint32 { return d.crc } + +func (d *digest) Sum(in []byte) []byte { + s := d.Sum32() + return append(in, byte(s>>24), byte(s>>16), byte(s>>8), byte(s)) +} + +// Checksum returns the CRC-32 checksum of data +// using the polynomial represented by the [Table]. +func Checksum(data []byte, tab *Table) uint32 { return Update(0, tab, data) } + +// ChecksumIEEE returns the CRC-32 checksum of data +// using the [IEEE] polynomial. +func ChecksumIEEE(data []byte) uint32 { + ieeeInitOnce() + return updateIEEE(0, data) +} + +// tableSum returns the IEEE checksum of table t. +func tableSum(t *Table) uint32 { + var a [1024]byte + b := a[:0] + if t != nil { + for _, x := range t { + b = binary.BigEndian.AppendUint32(b, x) + } + } + return ChecksumIEEE(b) +} diff --git a/vendor/github.com/klauspost/crc32/crc32_amd64.go b/vendor/github.com/klauspost/crc32/crc32_amd64.go new file mode 100644 index 00000000000..c6d30b25c92 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_amd64.go @@ -0,0 +1,253 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// AMD64-specific hardware-assisted CRC32 algorithms. See crc32.go for a +// description of the interface that each architecture-specific file +// implements. + +package crc32 + +import ( + "unsafe" + + "golang.org/x/sys/cpu" +) + +// This file contains the code to call the SSE 4.2 version of the Castagnoli +// and IEEE CRC. + +// castagnoliSSE42 is defined in crc32_amd64.s and uses the SSE 4.2 CRC32 +// instruction. +// +//go:noescape +func castagnoliSSE42(crc uint32, p []byte) uint32 + +// castagnoliSSE42Triple is defined in crc32_amd64.s and uses the SSE 4.2 CRC32 +// instruction. +// +//go:noescape +func castagnoliSSE42Triple( + crcA, crcB, crcC uint32, + a, b, c []byte, + rounds uint32, +) (retA uint32, retB uint32, retC uint32) + +// ieeeCLMUL is defined in crc_amd64.s and uses the PCLMULQDQ +// instruction as well as SSE 4.1. +// +//go:noescape +func ieeeCLMUL(crc uint32, p []byte) uint32 + +// castagnoliCLMULAvx512 is defined in crc_amd64.s and uses the PCLMULQDQ +// instruction as well as SSE 4.1. +// +//go:noescape +func castagnoliCLMULAvx512(crc uint32, p []byte) uint32 + +// ieeeCLMUL is defined in crc_amd64.s and uses the PCLMULQDQ +// instruction as well as SSE 4.1. +// +//go:noescape +func ieeeCLMULAvx512(crc uint32, p []byte) uint32 + +const castagnoliK1 = 168 +const castagnoliK2 = 1344 + +type sse42Table [4]Table + +var castagnoliSSE42TableK1 *sse42Table +var castagnoliSSE42TableK2 *sse42Table + +func archAvailableCastagnoli() bool { + return cpu.X86.HasSSE42 +} + +func archInitCastagnoli() { + if !cpu.X86.HasSSE42 { + panic("arch-specific Castagnoli not available") + } + castagnoliSSE42TableK1 = new(sse42Table) + castagnoliSSE42TableK2 = new(sse42Table) + // See description in updateCastagnoli. + // t[0][i] = CRC(i000, O) + // t[1][i] = CRC(0i00, O) + // t[2][i] = CRC(00i0, O) + // t[3][i] = CRC(000i, O) + // where O is a sequence of K zeros. + var tmp [castagnoliK2]byte + for b := 0; b < 4; b++ { + for i := 0; i < 256; i++ { + val := uint32(i) << uint32(b*8) + castagnoliSSE42TableK1[b][i] = castagnoliSSE42(val, tmp[:castagnoliK1]) + castagnoliSSE42TableK2[b][i] = castagnoliSSE42(val, tmp[:]) + } + } +} + +// castagnoliShift computes the CRC32-C of K1 or K2 zeroes (depending on the +// table given) with the given initial crc value. This corresponds to +// CRC(crc, O) in the description in updateCastagnoli. +func castagnoliShift(table *sse42Table, crc uint32) uint32 { + return table[3][crc>>24] ^ + table[2][(crc>>16)&0xFF] ^ + table[1][(crc>>8)&0xFF] ^ + table[0][crc&0xFF] +} + +func archUpdateCastagnoli(crc uint32, p []byte) uint32 { + if !cpu.X86.HasSSE42 { + panic("not available") + } + + // This method is inspired from the algorithm in Intel's white paper: + // "Fast CRC Computation for iSCSI Polynomial Using CRC32 Instruction" + // The same strategy of splitting the buffer in three is used but the + // combining calculation is different; the complete derivation is explained + // below. + // + // -- The basic idea -- + // + // The CRC32 instruction (available in SSE4.2) can process 8 bytes at a + // time. In recent Intel architectures the instruction takes 3 cycles; + // however the processor can pipeline up to three instructions if they + // don't depend on each other. + // + // Roughly this means that we can process three buffers in about the same + // time we can process one buffer. + // + // The idea is then to split the buffer in three, CRC the three pieces + // separately and then combine the results. + // + // Combining the results requires precomputed tables, so we must choose a + // fixed buffer length to optimize. The longer the length, the faster; but + // only buffers longer than this length will use the optimization. We choose + // two cutoffs and compute tables for both: + // - one around 512: 168*3=504 + // - one around 4KB: 1344*3=4032 + // + // -- The nitty gritty -- + // + // Let CRC(I, X) be the non-inverted CRC32-C of the sequence X (with + // initial non-inverted CRC I). This function has the following properties: + // (a) CRC(I, AB) = CRC(CRC(I, A), B) + // (b) CRC(I, A xor B) = CRC(I, A) xor CRC(0, B) + // + // Say we want to compute CRC(I, ABC) where A, B, C are three sequences of + // K bytes each, where K is a fixed constant. Let O be the sequence of K zero + // bytes. + // + // CRC(I, ABC) = CRC(I, ABO xor C) + // = CRC(I, ABO) xor CRC(0, C) + // = CRC(CRC(I, AB), O) xor CRC(0, C) + // = CRC(CRC(I, AO xor B), O) xor CRC(0, C) + // = CRC(CRC(I, AO) xor CRC(0, B), O) xor CRC(0, C) + // = CRC(CRC(CRC(I, A), O) xor CRC(0, B), O) xor CRC(0, C) + // + // The castagnoliSSE42Triple function can compute CRC(I, A), CRC(0, B), + // and CRC(0, C) efficiently. We just need to find a way to quickly compute + // CRC(uvwx, O) given a 4-byte initial value uvwx. We can precompute these + // values; since we can't have a 32-bit table, we break it up into four + // 8-bit tables: + // + // CRC(uvwx, O) = CRC(u000, O) xor + // CRC(0v00, O) xor + // CRC(00w0, O) xor + // CRC(000x, O) + // + // We can compute tables corresponding to the four terms for all 8-bit + // values. + + crc = ^crc + + // Disabled, since it is not significantly faster than the SSE 4.2 version, even on Zen 5. + if false && len(p) >= 2048 && cpu.X86.HasAVX512F && cpu.X86.HasAVX512VL && cpu.X86.HasAVX512VPCLMULQDQ && cpu.X86.HasPCLMULQDQ { + left := len(p) & 15 + do := len(p) - left + crc = castagnoliCLMULAvx512(crc, p[:do]) + return ^castagnoliSSE42(crc, p[do:]) + } + + // If a buffer is long enough to use the optimization, process the first few + // bytes to align the buffer to an 8 byte boundary (if necessary). + if len(p) >= castagnoliK1*3 { + delta := int(uintptr(unsafe.Pointer(&p[0])) & 7) + if delta != 0 { + delta = 8 - delta + crc = castagnoliSSE42(crc, p[:delta]) + p = p[delta:] + } + } + + // Process 3*K2 at a time. + for len(p) >= castagnoliK2*3 { + // Compute CRC(I, A), CRC(0, B), and CRC(0, C). + crcA, crcB, crcC := castagnoliSSE42Triple( + crc, 0, 0, + p, p[castagnoliK2:], p[castagnoliK2*2:], + castagnoliK2/24) + + // CRC(I, AB) = CRC(CRC(I, A), O) xor CRC(0, B) + crcAB := castagnoliShift(castagnoliSSE42TableK2, crcA) ^ crcB + // CRC(I, ABC) = CRC(CRC(I, AB), O) xor CRC(0, C) + crc = castagnoliShift(castagnoliSSE42TableK2, crcAB) ^ crcC + p = p[castagnoliK2*3:] + } + + // Process 3*K1 at a time. + for len(p) >= castagnoliK1*3 { + // Compute CRC(I, A), CRC(0, B), and CRC(0, C). + crcA, crcB, crcC := castagnoliSSE42Triple( + crc, 0, 0, + p, p[castagnoliK1:], p[castagnoliK1*2:], + castagnoliK1/24) + + // CRC(I, AB) = CRC(CRC(I, A), O) xor CRC(0, B) + crcAB := castagnoliShift(castagnoliSSE42TableK1, crcA) ^ crcB + // CRC(I, ABC) = CRC(CRC(I, AB), O) xor CRC(0, C) + crc = castagnoliShift(castagnoliSSE42TableK1, crcAB) ^ crcC + p = p[castagnoliK1*3:] + } + + // Use the simple implementation for what's left. + crc = castagnoliSSE42(crc, p) + return ^crc +} + +func archAvailableIEEE() bool { + return cpu.X86.HasPCLMULQDQ && cpu.X86.HasSSE41 +} + +var archIeeeTable8 *slicing8Table + +func archInitIEEE() { + if !cpu.X86.HasPCLMULQDQ || !cpu.X86.HasSSE41 { + panic("not available") + } + // We still use slicing-by-8 for small buffers. + archIeeeTable8 = slicingMakeTable(IEEE) +} + +func archUpdateIEEE(crc uint32, p []byte) uint32 { + if !cpu.X86.HasPCLMULQDQ || !cpu.X86.HasSSE41 { + panic("not available") + } + + if len(p) >= 64 { + if len(p) >= 1024 && cpu.X86.HasAVX512F && cpu.X86.HasAVX512VL && cpu.X86.HasAVX512VPCLMULQDQ && cpu.X86.HasPCLMULQDQ { + left := len(p) & 15 + do := len(p) - left + crc = ^ieeeCLMULAvx512(^crc, p[:do]) + p = p[do:] + } else { + left := len(p) & 15 + do := len(p) - left + crc = ^ieeeCLMUL(^crc, p[:do]) + p = p[do:] + } + } + if len(p) == 0 { + return crc + } + return slicingUpdate(crc, archIeeeTable8, p) +} diff --git a/vendor/github.com/klauspost/crc32/crc32_amd64.s b/vendor/github.com/klauspost/crc32/crc32_amd64.s new file mode 100644 index 00000000000..e2de3a5cb68 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_amd64.s @@ -0,0 +1,527 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// castagnoliSSE42 updates the (non-inverted) crc with the given buffer. +// +// func castagnoliSSE42(crc uint32, p []byte) uint32 +TEXT ·castagnoliSSE42(SB), NOSPLIT, $0 + MOVL crc+0(FP), AX // CRC value + MOVQ p+8(FP), SI // data pointer + MOVQ p_len+16(FP), CX // len(p) + + // If there are fewer than 8 bytes to process, skip alignment. + CMPQ CX, $8 + JL less_than_8 + + MOVQ SI, BX + ANDQ $7, BX + JZ aligned + + // Process the first few bytes to 8-byte align the input. + + // BX = 8 - BX. We need to process this many bytes to align. + SUBQ $1, BX + XORQ $7, BX + + BTQ $0, BX + JNC align_2 + + CRC32B (SI), AX + DECQ CX + INCQ SI + +align_2: + BTQ $1, BX + JNC align_4 + + CRC32W (SI), AX + + SUBQ $2, CX + ADDQ $2, SI + +align_4: + BTQ $2, BX + JNC aligned + + CRC32L (SI), AX + + SUBQ $4, CX + ADDQ $4, SI + +aligned: + // The input is now 8-byte aligned and we can process 8-byte chunks. + CMPQ CX, $8 + JL less_than_8 + + CRC32Q (SI), AX + ADDQ $8, SI + SUBQ $8, CX + JMP aligned + +less_than_8: + // We may have some bytes left over; process 4 bytes, then 2, then 1. + BTQ $2, CX + JNC less_than_4 + + CRC32L (SI), AX + ADDQ $4, SI + +less_than_4: + BTQ $1, CX + JNC less_than_2 + + CRC32W (SI), AX + ADDQ $2, SI + +less_than_2: + BTQ $0, CX + JNC done + + CRC32B (SI), AX + +done: + MOVL AX, ret+32(FP) + RET + +// castagnoliSSE42Triple updates three (non-inverted) crcs with (24*rounds) +// bytes from each buffer. +// +// func castagnoliSSE42Triple( +// crc1, crc2, crc3 uint32, +// a, b, c []byte, +// rounds uint32, +// ) (retA uint32, retB uint32, retC uint32) +TEXT ·castagnoliSSE42Triple(SB), NOSPLIT, $0 + MOVL crcA+0(FP), AX + MOVL crcB+4(FP), CX + MOVL crcC+8(FP), DX + + MOVQ a+16(FP), R8 // data pointer + MOVQ b+40(FP), R9 // data pointer + MOVQ c+64(FP), R10 // data pointer + + MOVL rounds+88(FP), R11 + +loop: + CRC32Q (R8), AX + CRC32Q (R9), CX + CRC32Q (R10), DX + + CRC32Q 8(R8), AX + CRC32Q 8(R9), CX + CRC32Q 8(R10), DX + + CRC32Q 16(R8), AX + CRC32Q 16(R9), CX + CRC32Q 16(R10), DX + + ADDQ $24, R8 + ADDQ $24, R9 + ADDQ $24, R10 + + DECQ R11 + JNZ loop + + MOVL AX, retA+96(FP) + MOVL CX, retB+100(FP) + MOVL DX, retC+104(FP) + RET + +// CRC32 polynomial data +// +// These constants are lifted from the +// Linux kernel, since they avoid the costly +// PSHUFB 16 byte reversal proposed in the +// original Intel paper. +DATA r2r1<>+0(SB)/8, $0x154442bd4 +DATA r2r1<>+8(SB)/8, $0x1c6e41596 +DATA r4r3<>+0(SB)/8, $0x1751997d0 +DATA r4r3<>+8(SB)/8, $0x0ccaa009e +DATA rupoly<>+0(SB)/8, $0x1db710641 +DATA rupoly<>+8(SB)/8, $0x1f7011641 +DATA r5<>+0(SB)/8, $0x163cd6124 + +GLOBL r2r1<>(SB), RODATA, $16 +GLOBL r4r3<>(SB), RODATA, $16 +GLOBL rupoly<>(SB), RODATA, $16 +GLOBL r5<>(SB), RODATA, $8 + +// Based on https://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf +// len(p) must be at least 64, and must be a multiple of 16. + +// func ieeeCLMUL(crc uint32, p []byte) uint32 +TEXT ·ieeeCLMUL(SB), NOSPLIT, $0 + MOVL crc+0(FP), X0 // Initial CRC value + MOVQ p+8(FP), SI // data pointer + MOVQ p_len+16(FP), CX // len(p) + + MOVOU (SI), X1 + MOVOU 16(SI), X2 + MOVOU 32(SI), X3 + MOVOU 48(SI), X4 + PXOR X0, X1 + ADDQ $64, SI // buf+=64 + SUBQ $64, CX // len-=64 + CMPQ CX, $64 // Less than 64 bytes left + JB remain64 + + MOVOA r2r1<>+0(SB), X0 + +loopback64: + MOVOA X1, X5 + MOVOA X2, X6 + MOVOA X3, X7 + MOVOA X4, X8 + + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0, X0, X2 + PCLMULQDQ $0, X0, X3 + PCLMULQDQ $0, X0, X4 + + // Load next early + MOVOU (SI), X11 + MOVOU 16(SI), X12 + MOVOU 32(SI), X13 + MOVOU 48(SI), X14 + + PCLMULQDQ $0x11, X0, X5 + PCLMULQDQ $0x11, X0, X6 + PCLMULQDQ $0x11, X0, X7 + PCLMULQDQ $0x11, X0, X8 + + PXOR X5, X1 + PXOR X6, X2 + PXOR X7, X3 + PXOR X8, X4 + + PXOR X11, X1 + PXOR X12, X2 + PXOR X13, X3 + PXOR X14, X4 + + ADDQ $0x40, DI + ADDQ $64, SI // buf+=64 + SUBQ $64, CX // len-=64 + CMPQ CX, $64 // Less than 64 bytes left? + JGE loopback64 + + // Fold result into a single register (X1) +remain64: + MOVOA r4r3<>+0(SB), X0 + + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X2, X1 + + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X3, X1 + + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X4, X1 + + // If there is less than 16 bytes left we are done + CMPQ CX, $16 + JB finish + + // Encode 16 bytes +remain16: + MOVOU (SI), X10 + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X10, X1 + SUBQ $16, CX + ADDQ $16, SI + CMPQ CX, $16 + JGE remain16 + +finish: + // Fold final result into 32 bits and return it + PCMPEQB X3, X3 + PCLMULQDQ $1, X1, X0 + PSRLDQ $8, X1 + PXOR X0, X1 + + MOVOA X1, X2 + MOVQ r5<>+0(SB), X0 + + // Creates 32 bit mask. Note that we don't care about upper half. + PSRLQ $32, X3 + + PSRLDQ $4, X2 + PAND X3, X1 + PCLMULQDQ $0, X0, X1 + PXOR X2, X1 + + MOVOA rupoly<>+0(SB), X0 + + MOVOA X1, X2 + PAND X3, X1 + PCLMULQDQ $0x10, X0, X1 + PAND X3, X1 + PCLMULQDQ $0, X0, X1 + PXOR X2, X1 + + PEXTRD $1, X1, AX + MOVL AX, ret+32(FP) + + RET + +DATA r2r1X<>+0(SB)/8, $0x154442bd4 +DATA r2r1X<>+8(SB)/8, $0x1c6e41596 +DATA r2r1X<>+16(SB)/8, $0x154442bd4 +DATA r2r1X<>+24(SB)/8, $0x1c6e41596 +DATA r2r1X<>+32(SB)/8, $0x154442bd4 +DATA r2r1X<>+40(SB)/8, $0x1c6e41596 +DATA r2r1X<>+48(SB)/8, $0x154442bd4 +DATA r2r1X<>+56(SB)/8, $0x1c6e41596 +GLOBL r2r1X<>(SB), RODATA, $64 + +// Based on https://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf +// len(p) must be at least 128, and must be a multiple of 16. + +// func ieeeCLMULAvx512(crc uint32, p []byte) uint32 +TEXT ·ieeeCLMULAvx512(SB), NOSPLIT, $0 + MOVL crc+0(FP), AX // Initial CRC value + MOVQ p+8(FP), SI // data pointer + MOVQ p_len+16(FP), CX // len(p) + + VPXORQ Z0, Z0, Z0 + VMOVDQU64 (SI), Z1 + VMOVQ AX, X0 + VPXORQ Z0, Z1, Z1 // Merge initial CRC value into Z1 + ADDQ $64, SI // buf+=64 + SUBQ $64, CX // len-=64 + + VMOVDQU64 r2r1X<>+0(SB), Z0 + +loopback64: + // Load next early + VMOVDQU64 (SI), Z11 + + VPCLMULQDQ $0x11, Z0, Z1, Z5 + VPCLMULQDQ $0, Z0, Z1, Z1 + + VPTERNLOGD $0x96, Z11, Z5, Z1 // Combine results with xor into Z1 + + ADDQ $0x40, DI + ADDQ $64, SI // buf+=64 + SUBQ $64, CX // len-=64 + CMPQ CX, $64 // Less than 64 bytes left? + JGE loopback64 + + // Fold result into a single register (X1) +remain64: + VEXTRACTF32X4 $1, Z1, X2 // X2: Second 128-bit lane + VEXTRACTF32X4 $2, Z1, X3 // X3: Third 128-bit lane + VEXTRACTF32X4 $3, Z1, X4 // X4: Fourth 128-bit lane + + MOVOA r4r3<>+0(SB), X0 + + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X2, X1 + + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X3, X1 + + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X4, X1 + + // If there is less than 16 bytes left we are done + CMPQ CX, $16 + JB finish + + // Encode 16 bytes +remain16: + MOVOU (SI), X10 + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X10, X1 + SUBQ $16, CX + ADDQ $16, SI + CMPQ CX, $16 + JGE remain16 + +finish: + // Fold final result into 32 bits and return it + PCMPEQB X3, X3 + PCLMULQDQ $1, X1, X0 + PSRLDQ $8, X1 + PXOR X0, X1 + + MOVOA X1, X2 + MOVQ r5<>+0(SB), X0 + + // Creates 32 bit mask. Note that we don't care about upper half. + PSRLQ $32, X3 + + PSRLDQ $4, X2 + PAND X3, X1 + PCLMULQDQ $0, X0, X1 + PXOR X2, X1 + + MOVOA rupoly<>+0(SB), X0 + + MOVOA X1, X2 + PAND X3, X1 + PCLMULQDQ $0x10, X0, X1 + PAND X3, X1 + PCLMULQDQ $0, X0, X1 + PXOR X2, X1 + + PEXTRD $1, X1, AX + MOVL AX, ret+32(FP) + VZEROUPPER + RET + +// Castagonli Polynomial constants +DATA r2r1C<>+0(SB)/8, $0x0740eef02 +DATA r2r1C<>+8(SB)/8, $0x09e4addf8 +DATA r2r1C<>+16(SB)/8, $0x0740eef02 +DATA r2r1C<>+24(SB)/8, $0x09e4addf8 +DATA r2r1C<>+32(SB)/8, $0x0740eef02 +DATA r2r1C<>+40(SB)/8, $0x09e4addf8 +DATA r2r1C<>+48(SB)/8, $0x0740eef02 +DATA r2r1C<>+56(SB)/8, $0x09e4addf8 +GLOBL r2r1C<>(SB), RODATA, $64 + +DATA r4r3C<>+0(SB)/8, $0xf20c0dfe +DATA r4r3C<>+8(SB)/8, $0x14cd00bd6 +DATA rupolyC<>+0(SB)/8, $0x105ec76f0 +DATA rupolyC<>+8(SB)/8, $0xdea713f1 +DATA r5C<>+0(SB)/8, $0xdd45aab8 + +GLOBL r4r3C<>(SB), RODATA, $16 +GLOBL rupolyC<>(SB), RODATA, $16 +GLOBL r5C<>(SB), RODATA, $8 + +// Based on https://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf +// len(p) must be at least 128, and must be a multiple of 16. + +// func castagnoliCLMULAvx512(crc uint32, p []byte) uint32 +TEXT ·castagnoliCLMULAvx512(SB), NOSPLIT, $0 + MOVL crc+0(FP), AX // Initial CRC value + MOVQ p+8(FP), SI // data pointer + MOVQ p_len+16(FP), CX // len(p) + + VPXORQ Z0, Z0, Z0 + VMOVDQU64 (SI), Z1 + VMOVQ AX, X0 + VPXORQ Z0, Z1, Z1 // Merge initial CRC value into Z1 + ADDQ $64, SI // buf+=64 + SUBQ $64, CX // len-=64 + + VMOVDQU64 r2r1C<>+0(SB), Z0 + +loopback64: + // Load next early + VMOVDQU64 (SI), Z11 + + VPCLMULQDQ $0x11, Z0, Z1, Z5 + VPCLMULQDQ $0, Z0, Z1, Z1 + + VPTERNLOGD $0x96, Z11, Z5, Z1 // Combine results with xor into Z1 + + ADDQ $0x40, DI + ADDQ $64, SI // buf+=64 + SUBQ $64, CX // len-=64 + CMPQ CX, $64 // Less than 64 bytes left? + JGE loopback64 + + // Fold result into a single register (X1) +remain64: + VEXTRACTF32X4 $1, Z1, X2 // X2: Second 128-bit lane + VEXTRACTF32X4 $2, Z1, X3 // X3: Third 128-bit lane + VEXTRACTF32X4 $3, Z1, X4 // X4: Fourth 128-bit lane + + MOVOA r4r3C<>+0(SB), X0 + + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X2, X1 + + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X3, X1 + + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X4, X1 + + // If there is less than 16 bytes left we are done + CMPQ CX, $16 + JB finish + + // Encode 16 bytes +remain16: + MOVOU (SI), X10 + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X10, X1 + SUBQ $16, CX + ADDQ $16, SI + CMPQ CX, $16 + JGE remain16 + +finish: + // Fold final result into 32 bits and return it + PCMPEQB X3, X3 + PCLMULQDQ $1, X1, X0 + PSRLDQ $8, X1 + PXOR X0, X1 + + MOVOA X1, X2 + MOVQ r5C<>+0(SB), X0 + + // Creates 32 bit mask. Note that we don't care about upper half. + PSRLQ $32, X3 + + PSRLDQ $4, X2 + PAND X3, X1 + PCLMULQDQ $0, X0, X1 + PXOR X2, X1 + + MOVOA rupolyC<>+0(SB), X0 + + MOVOA X1, X2 + PAND X3, X1 + PCLMULQDQ $0x10, X0, X1 + PAND X3, X1 + PCLMULQDQ $0, X0, X1 + PXOR X2, X1 + + PEXTRD $1, X1, AX + MOVL AX, ret+32(FP) + VZEROUPPER + RET diff --git a/vendor/github.com/klauspost/crc32/crc32_arm64.go b/vendor/github.com/klauspost/crc32/crc32_arm64.go new file mode 100644 index 00000000000..7e9ac553986 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_arm64.go @@ -0,0 +1,50 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// ARM64-specific hardware-assisted CRC32 algorithms. See crc32.go for a +// description of the interface that each architecture-specific file +// implements. + +package crc32 + +import "golang.org/x/sys/cpu" + +func castagnoliUpdate(crc uint32, p []byte) uint32 +func ieeeUpdate(crc uint32, p []byte) uint32 + +func archAvailableCastagnoli() bool { + return cpu.ARM64.HasCRC32 +} + +func archInitCastagnoli() { + if !cpu.ARM64.HasCRC32 { + panic("arch-specific crc32 instruction for Castagnoli not available") + } +} + +func archUpdateCastagnoli(crc uint32, p []byte) uint32 { + if !cpu.ARM64.HasCRC32 { + panic("arch-specific crc32 instruction for Castagnoli not available") + } + + return ^castagnoliUpdate(^crc, p) +} + +func archAvailableIEEE() bool { + return cpu.ARM64.HasCRC32 +} + +func archInitIEEE() { + if !cpu.ARM64.HasCRC32 { + panic("arch-specific crc32 instruction for IEEE not available") + } +} + +func archUpdateIEEE(crc uint32, p []byte) uint32 { + if !cpu.ARM64.HasCRC32 { + panic("arch-specific crc32 instruction for IEEE not available") + } + + return ^ieeeUpdate(^crc, p) +} diff --git a/vendor/github.com/klauspost/crc32/crc32_arm64.s b/vendor/github.com/klauspost/crc32/crc32_arm64.s new file mode 100644 index 00000000000..e82778f7bd6 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_arm64.s @@ -0,0 +1,97 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// castagnoliUpdate updates the non-inverted crc with the given data. + +// func castagnoliUpdate(crc uint32, p []byte) uint32 +TEXT ·castagnoliUpdate(SB), NOSPLIT, $0-36 + MOVWU crc+0(FP), R9 // CRC value + MOVD p+8(FP), R13 // data pointer + MOVD p_len+16(FP), R11 // len(p) + +update: + CMP $16, R11 + BLT less_than_16 + LDP.P 16(R13), (R8, R10) + CRC32CX R8, R9 + CRC32CX R10, R9 + SUB $16, R11 + + JMP update + +less_than_16: + TBZ $3, R11, less_than_8 + + MOVD.P 8(R13), R10 + CRC32CX R10, R9 + +less_than_8: + TBZ $2, R11, less_than_4 + + MOVWU.P 4(R13), R10 + CRC32CW R10, R9 + +less_than_4: + TBZ $1, R11, less_than_2 + + MOVHU.P 2(R13), R10 + CRC32CH R10, R9 + +less_than_2: + TBZ $0, R11, done + + MOVBU (R13), R10 + CRC32CB R10, R9 + +done: + MOVWU R9, ret+32(FP) + RET + +// ieeeUpdate updates the non-inverted crc with the given data. + +// func ieeeUpdate(crc uint32, p []byte) uint32 +TEXT ·ieeeUpdate(SB), NOSPLIT, $0-36 + MOVWU crc+0(FP), R9 // CRC value + MOVD p+8(FP), R13 // data pointer + MOVD p_len+16(FP), R11 // len(p) + +update: + CMP $16, R11 + BLT less_than_16 + LDP.P 16(R13), (R8, R10) + CRC32X R8, R9 + CRC32X R10, R9 + SUB $16, R11 + + JMP update + +less_than_16: + TBZ $3, R11, less_than_8 + + MOVD.P 8(R13), R10 + CRC32X R10, R9 + +less_than_8: + TBZ $2, R11, less_than_4 + + MOVWU.P 4(R13), R10 + CRC32W R10, R9 + +less_than_4: + TBZ $1, R11, less_than_2 + + MOVHU.P 2(R13), R10 + CRC32H R10, R9 + +less_than_2: + TBZ $0, R11, done + + MOVBU (R13), R10 + CRC32B R10, R9 + +done: + MOVWU R9, ret+32(FP) + RET diff --git a/vendor/github.com/klauspost/crc32/crc32_generic.go b/vendor/github.com/klauspost/crc32/crc32_generic.go new file mode 100644 index 00000000000..d1cf69cf462 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_generic.go @@ -0,0 +1,91 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains CRC32 algorithms that are not specific to any architecture +// and don't use hardware acceleration. +// +// The simple (and slow) CRC32 implementation only uses a 256*4 bytes table. +// +// The slicing-by-8 algorithm is a faster implementation that uses a bigger +// table (8*256*4 bytes). + +package crc32 + +import "encoding/binary" + +// simpleMakeTable allocates and constructs a Table for the specified +// polynomial. The table is suitable for use with the simple algorithm +// (simpleUpdate). +func simpleMakeTable(poly uint32) *Table { + t := new(Table) + simplePopulateTable(poly, t) + return t +} + +// simplePopulateTable constructs a Table for the specified polynomial, suitable +// for use with simpleUpdate. +func simplePopulateTable(poly uint32, t *Table) { + for i := 0; i < 256; i++ { + crc := uint32(i) + for j := 0; j < 8; j++ { + if crc&1 == 1 { + crc = (crc >> 1) ^ poly + } else { + crc >>= 1 + } + } + t[i] = crc + } +} + +// simpleUpdate uses the simple algorithm to update the CRC, given a table that +// was previously computed using simpleMakeTable. +func simpleUpdate(crc uint32, tab *Table, p []byte) uint32 { + crc = ^crc + for _, v := range p { + crc = tab[byte(crc)^v] ^ (crc >> 8) + } + return ^crc +} + +// Use slicing-by-8 when payload >= this value. +const slicing8Cutoff = 16 + +// slicing8Table is array of 8 Tables, used by the slicing-by-8 algorithm. +type slicing8Table [8]Table + +// slicingMakeTable constructs a slicing8Table for the specified polynomial. The +// table is suitable for use with the slicing-by-8 algorithm (slicingUpdate). +func slicingMakeTable(poly uint32) *slicing8Table { + t := new(slicing8Table) + simplePopulateTable(poly, &t[0]) + for i := 0; i < 256; i++ { + crc := t[0][i] + for j := 1; j < 8; j++ { + crc = t[0][crc&0xFF] ^ (crc >> 8) + t[j][i] = crc + } + } + return t +} + +// slicingUpdate uses the slicing-by-8 algorithm to update the CRC, given a +// table that was previously computed using slicingMakeTable. +func slicingUpdate(crc uint32, tab *slicing8Table, p []byte) uint32 { + if len(p) >= slicing8Cutoff { + crc = ^crc + for len(p) > 8 { + crc ^= binary.LittleEndian.Uint32(p) + crc = tab[0][p[7]] ^ tab[1][p[6]] ^ tab[2][p[5]] ^ tab[3][p[4]] ^ + tab[4][crc>>24] ^ tab[5][(crc>>16)&0xFF] ^ + tab[6][(crc>>8)&0xFF] ^ tab[7][crc&0xFF] + p = p[8:] + } + crc = ^crc + } + if len(p) == 0 { + return crc + } + return simpleUpdate(crc, &tab[0], p) +} diff --git a/vendor/github.com/klauspost/crc32/crc32_loong64.go b/vendor/github.com/klauspost/crc32/crc32_loong64.go new file mode 100644 index 00000000000..3e0fd9778d5 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_loong64.go @@ -0,0 +1,50 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// LoongArch64-specific hardware-assisted CRC32 algorithms. See crc32.go for a +// description of the interface that each architecture-specific file +// implements. + +package crc32 + +import "golang.org/x/sys/cpu" + +func castagnoliUpdate(crc uint32, p []byte) uint32 +func ieeeUpdate(crc uint32, p []byte) uint32 + +func archAvailableCastagnoli() bool { + return cpu.Loong64.HasCRC32 +} + +func archInitCastagnoli() { + if !cpu.Loong64.HasCRC32 { + panic("arch-specific crc32 instruction for Castagnoli not available") + } +} + +func archUpdateCastagnoli(crc uint32, p []byte) uint32 { + if !cpu.Loong64.HasCRC32 { + panic("arch-specific crc32 instruction for Castagnoli not available") + } + + return ^castagnoliUpdate(^crc, p) +} + +func archAvailableIEEE() bool { + return cpu.Loong64.HasCRC32 +} + +func archInitIEEE() { + if !cpu.Loong64.HasCRC32 { + panic("arch-specific crc32 instruction for IEEE not available") + } +} + +func archUpdateIEEE(crc uint32, p []byte) uint32 { + if !cpu.Loong64.HasCRC32 { + panic("arch-specific crc32 instruction for IEEE not available") + } + + return ^ieeeUpdate(^crc, p) +} diff --git a/vendor/github.com/klauspost/crc32/crc32_loong64.s b/vendor/github.com/klauspost/crc32/crc32_loong64.s new file mode 100644 index 00000000000..7165714dcad --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_loong64.s @@ -0,0 +1,160 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// castagnoliUpdate updates the non-inverted crc with the given data. + +// func castagnoliUpdate(crc uint32, p []byte) uint32 +TEXT ·castagnoliUpdate(SB), NOSPLIT, $0-36 + MOVWU crc+0(FP), R4 // a0 = CRC value + MOVV p+8(FP), R5 // a1 = data pointer + MOVV p_len+16(FP), R6 // a2 = len(p) + + SGT $8, R6, R12 + BNE R12, less_than_8 + AND $7, R5, R12 + BEQ R12, aligned + + // Process the first few bytes to 8-byte align the input. + // t0 = 8 - t0. We need to process this many bytes to align. + SUB $1, R12 + XOR $7, R12 + + AND $1, R12, R13 + BEQ R13, align_2 + MOVB (R5), R13 + CRCCWBW R4, R13, R4 + ADDV $1, R5 + ADDV $-1, R6 + +align_2: + AND $2, R12, R13 + BEQ R13, align_4 + MOVH (R5), R13 + CRCCWHW R4, R13, R4 + ADDV $2, R5 + ADDV $-2, R6 + +align_4: + AND $4, R12, R13 + BEQ R13, aligned + MOVW (R5), R13 + CRCCWWW R4, R13, R4 + ADDV $4, R5 + ADDV $-4, R6 + +aligned: + // The input is now 8-byte aligned and we can process 8-byte chunks. + SGT $8, R6, R12 + BNE R12, less_than_8 + MOVV (R5), R13 + CRCCWVW R4, R13, R4 + ADDV $8, R5 + ADDV $-8, R6 + JMP aligned + +less_than_8: + // We may have some bytes left over; process 4 bytes, then 2, then 1. + AND $4, R6, R12 + BEQ R12, less_than_4 + MOVW (R5), R13 + CRCCWWW R4, R13, R4 + ADDV $4, R5 + ADDV $-4, R6 + +less_than_4: + AND $2, R6, R12 + BEQ R12, less_than_2 + MOVH (R5), R13 + CRCCWHW R4, R13, R4 + ADDV $2, R5 + ADDV $-2, R6 + +less_than_2: + BEQ R6, done + MOVB (R5), R13 + CRCCWBW R4, R13, R4 + +done: + MOVW R4, ret+32(FP) + RET + +// ieeeUpdate updates the non-inverted crc with the given data. + +// func ieeeUpdate(crc uint32, p []byte) uint32 +TEXT ·ieeeUpdate(SB), NOSPLIT, $0-36 + MOVWU crc+0(FP), R4 // a0 = CRC value + MOVV p+8(FP), R5 // a1 = data pointer + MOVV p_len+16(FP), R6 // a2 = len(p) + + SGT $8, R6, R12 + BNE R12, less_than_8 + AND $7, R5, R12 + BEQ R12, aligned + + // Process the first few bytes to 8-byte align the input. + // t0 = 8 - t0. We need to process this many bytes to align. + SUB $1, R12 + XOR $7, R12 + + AND $1, R12, R13 + BEQ R13, align_2 + MOVB (R5), R13 + CRCWBW R4, R13, R4 + ADDV $1, R5 + ADDV $-1, R6 + +align_2: + AND $2, R12, R13 + BEQ R13, align_4 + MOVH (R5), R13 + CRCWHW R4, R13, R4 + ADDV $2, R5 + ADDV $-2, R6 + +align_4: + AND $4, R12, R13 + BEQ R13, aligned + MOVW (R5), R13 + CRCWWW R4, R13, R4 + ADDV $4, R5 + ADDV $-4, R6 + +aligned: + // The input is now 8-byte aligned and we can process 8-byte chunks. + SGT $8, R6, R12 + BNE R12, less_than_8 + MOVV (R5), R13 + CRCWVW R4, R13, R4 + ADDV $8, R5 + ADDV $-8, R6 + JMP aligned + +less_than_8: + // We may have some bytes left over; process 4 bytes, then 2, then 1. + AND $4, R6, R12 + BEQ R12, less_than_4 + MOVW (R5), R13 + CRCWWW R4, R13, R4 + ADDV $4, R5 + ADDV $-4, R6 + +less_than_4: + AND $2, R6, R12 + BEQ R12, less_than_2 + MOVH (R5), R13 + CRCWHW R4, R13, R4 + ADDV $2, R5 + ADDV $-2, R6 + +less_than_2: + BEQ R6, done + MOVB (R5), R13 + CRCWBW R4, R13, R4 + +done: + MOVW R4, ret+32(FP) + RET + diff --git a/vendor/github.com/klauspost/crc32/crc32_otherarch.go b/vendor/github.com/klauspost/crc32/crc32_otherarch.go new file mode 100644 index 00000000000..f900968ad3d --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_otherarch.go @@ -0,0 +1,15 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 && !s390x && !ppc64le && !arm64 && !loong64 + +package crc32 + +func archAvailableIEEE() bool { return false } +func archInitIEEE() { panic("not available") } +func archUpdateIEEE(crc uint32, p []byte) uint32 { panic("not available") } + +func archAvailableCastagnoli() bool { return false } +func archInitCastagnoli() { panic("not available") } +func archUpdateCastagnoli(crc uint32, p []byte) uint32 { panic("not available") } diff --git a/vendor/github.com/klauspost/crc32/crc32_ppc64le.go b/vendor/github.com/klauspost/crc32/crc32_ppc64le.go new file mode 100644 index 00000000000..c22e38e0094 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_ppc64le.go @@ -0,0 +1,88 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package crc32 + +import ( + "unsafe" +) + +const ( + vecMinLen = 16 + vecAlignMask = 15 // align to 16 bytes + crcIEEE = 1 + crcCast = 2 +) + +//go:noescape +func ppc64SlicingUpdateBy8(crc uint32, table8 *slicing8Table, p []byte) uint32 + +// this function requires the buffer to be 16 byte aligned and > 16 bytes long. +// +//go:noescape +func vectorCrc32(crc uint32, poly uint32, p []byte) uint32 + +var archCastagnoliTable8 *slicing8Table + +func archInitCastagnoli() { + archCastagnoliTable8 = slicingMakeTable(Castagnoli) +} + +func archUpdateCastagnoli(crc uint32, p []byte) uint32 { + if len(p) >= 4*vecMinLen { + // If not aligned then process the initial unaligned bytes + + if uint64(uintptr(unsafe.Pointer(&p[0])))&uint64(vecAlignMask) != 0 { + align := uint64(uintptr(unsafe.Pointer(&p[0]))) & uint64(vecAlignMask) + newlen := vecMinLen - align + crc = ppc64SlicingUpdateBy8(crc, archCastagnoliTable8, p[:newlen]) + p = p[newlen:] + } + // p should be aligned now + aligned := len(p) & ^vecAlignMask + crc = vectorCrc32(crc, crcCast, p[:aligned]) + p = p[aligned:] + } + if len(p) == 0 { + return crc + } + return ppc64SlicingUpdateBy8(crc, archCastagnoliTable8, p) +} + +func archAvailableIEEE() bool { + return true +} +func archAvailableCastagnoli() bool { + return true +} + +var archIeeeTable8 *slicing8Table + +func archInitIEEE() { + // We still use slicing-by-8 for small buffers. + archIeeeTable8 = slicingMakeTable(IEEE) +} + +// archUpdateIEEE calculates the checksum of p using vectorizedIEEE. +func archUpdateIEEE(crc uint32, p []byte) uint32 { + + // Check if vector code should be used. If not aligned, then handle those + // first up to the aligned bytes. + + if len(p) >= 4*vecMinLen { + if uint64(uintptr(unsafe.Pointer(&p[0])))&uint64(vecAlignMask) != 0 { + align := uint64(uintptr(unsafe.Pointer(&p[0]))) & uint64(vecAlignMask) + newlen := vecMinLen - align + crc = ppc64SlicingUpdateBy8(crc, archIeeeTable8, p[:newlen]) + p = p[newlen:] + } + aligned := len(p) & ^vecAlignMask + crc = vectorCrc32(crc, crcIEEE, p[:aligned]) + p = p[aligned:] + } + if len(p) == 0 { + return crc + } + return ppc64SlicingUpdateBy8(crc, archIeeeTable8, p) +} diff --git a/vendor/github.com/klauspost/crc32/crc32_ppc64le.s b/vendor/github.com/klauspost/crc32/crc32_ppc64le.s new file mode 100644 index 00000000000..87edef7053f --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_ppc64le.s @@ -0,0 +1,736 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The vectorized implementation found below is a derived work +// from code written by Anton Blanchard found +// at https://github.com/antonblanchard/crc32-vpmsum. The original +// is dual licensed under GPL and Apache 2. As the copyright holder +// for the work, IBM has contributed this new work under +// the golang license. + +// Changes include porting to Go assembler with modifications for +// the Go ABI for ppc64le. + +#include "textflag.h" + +#define POWER8_OFFSET 132 + +#define off16 R16 +#define off32 R17 +#define off48 R18 +#define off64 R19 +#define off80 R20 +#define off96 R21 +#define off112 R22 + +#define const1 V24 +#define const2 V25 + +#define byteswap V26 +#define mask_32bit V27 +#define mask_64bit V28 +#define zeroes V29 + +#define MAX_SIZE 32*1024 +#define REFLECT + +TEXT ·ppc64SlicingUpdateBy8(SB), NOSPLIT|NOFRAME, $0-44 + MOVWZ crc+0(FP), R3 // incoming crc + MOVD table8+8(FP), R4 // *Table + MOVD p+16(FP), R5 + MOVD p_len+24(FP), R6 // p len + + CMP $0, R6 // len == 0? + BNE start + MOVW R3, ret+40(FP) // return crc + RET + +start: + NOR R3, R3, R7 // ^crc + MOVWZ R7, R7 // 32 bits + CMP R6, $16 + MOVD R6, CTR + BLT short + SRAD $3, R6, R8 // 8 byte chunks + MOVD R8, CTR + +loop: + MOVWZ 0(R5), R8 // 0-3 bytes of p ?Endian? + MOVWZ 4(R5), R9 // 4-7 bytes of p + MOVD R4, R10 // &tab[0] + XOR R7, R8, R7 // crc ^= byte[0:3] + RLDICL $40, R9, $56, R17 // p[7] + SLD $2, R17, R17 // p[7]*4 + RLDICL $40, R7, $56, R8 // crc>>24 + SLD $2, R8, R8 // crc>>24*4 + RLDICL $48, R9, $56, R18 // p[6] + SLD $2, R18, R18 // p[6]*4 + MOVWZ (R10)(R17), R21 // tab[0][p[7]] + ADD $1024, R10, R10 // tab[1] + RLDICL $56, R9, $56, R19 // p[5] + SLD $2, R19, R19 // p[5]*4:1 + MOVWZ (R10)(R18), R22 // tab[1][p[6]] + ADD $1024, R10, R10 // tab[2] + XOR R21, R22, R21 // xor done R22 + CLRLSLDI $56, R9, $2, R20 + MOVWZ (R10)(R19), R23 // tab[2][p[5]] + ADD $1024, R10, R10 // &tab[3] + XOR R21, R23, R21 // xor done R23 + MOVWZ (R10)(R20), R24 // tab[3][p[4]] + ADD $1024, R10, R10 // &tab[4] + XOR R21, R24, R21 // xor done R24 + MOVWZ (R10)(R8), R25 // tab[4][crc>>24] + RLDICL $48, R7, $56, R24 // crc>>16&0xFF + XOR R21, R25, R21 // xor done R25 + ADD $1024, R10, R10 // &tab[5] + SLD $2, R24, R24 // crc>>16&0xFF*4 + MOVWZ (R10)(R24), R26 // tab[5][crc>>16&0xFF] + XOR R21, R26, R21 // xor done R26 + RLDICL $56, R7, $56, R25 // crc>>8 + ADD $1024, R10, R10 // &tab[6] + SLD $2, R25, R25 // crc>>8&FF*2 + MOVBZ R7, R26 // crc&0xFF + MOVWZ (R10)(R25), R27 // tab[6][crc>>8&0xFF] + ADD $1024, R10, R10 // &tab[7] + SLD $2, R26, R26 // crc&0xFF*2 + XOR R21, R27, R21 // xor done R27 + ADD $8, R5 // p = p[8:] + MOVWZ (R10)(R26), R28 // tab[7][crc&0xFF] + XOR R21, R28, R21 // xor done R28 + MOVWZ R21, R7 // crc for next round + BDNZ loop + ANDCC $7, R6, R8 // any leftover bytes + BEQ done // none --> done + MOVD R8, CTR // byte count + PCALIGN $16 // align short loop + +short: + MOVBZ 0(R5), R8 // get v + XOR R8, R7, R8 // byte(crc)^v -> R8 + RLDIC $2, R8, $54, R8 // rldicl r8,r8,2,22 + SRD $8, R7, R14 // crc>>8 + MOVWZ (R4)(R8), R10 + ADD $1, R5 + XOR R10, R14, R7 // loop crc in R7 + BDNZ short + +done: + NOR R7, R7, R7 // ^crc + MOVW R7, ret+40(FP) // return crc + RET + +#ifdef BYTESWAP_DATA +DATA ·byteswapcons+0(SB)/8, $0x0706050403020100 +DATA ·byteswapcons+8(SB)/8, $0x0f0e0d0c0b0a0908 + +GLOBL ·byteswapcons+0(SB), RODATA, $16 +#endif + +TEXT ·vectorCrc32(SB), NOSPLIT|NOFRAME, $0-36 + MOVWZ crc+0(FP), R3 // incoming crc + MOVWZ ctab+4(FP), R14 // crc poly id + MOVD p+8(FP), R4 + MOVD p_len+16(FP), R5 // p len + + // R3 = incoming crc + // R14 = constant table identifier + // R5 = address of bytes + // R6 = length of bytes + + // defines for index loads + + MOVD $16, off16 + MOVD $32, off32 + MOVD $48, off48 + MOVD $64, off64 + MOVD $80, off80 + MOVD $96, off96 + MOVD $112, off112 + MOVD $0, R15 + + MOVD R3, R10 // save initial crc + + NOR R3, R3, R3 // ^crc + MOVWZ R3, R3 // 32 bits + VXOR zeroes, zeroes, zeroes // clear the V reg + VSPLTISW $-1, V0 + VSLDOI $4, V29, V0, mask_32bit + VSLDOI $8, V29, V0, mask_64bit + + VXOR V8, V8, V8 + MTVSRD R3, VS40 // crc initial value VS40 = V8 + +#ifdef REFLECT + VSLDOI $8, zeroes, V8, V8 // or: VSLDOI V29,V8,V27,4 for top 32 bits? + +#else + VSLDOI $4, V8, zeroes, V8 + +#endif + +#ifdef BYTESWAP_DATA + MOVD $·byteswapcons(SB), R3 + LVX (R3), byteswap + +#endif + + CMPU R5, $256 // length of bytes + BLT short + + RLDICR $0, R5, $56, R6 // chunk to process + + // First step for larger sizes +l1: + MOVD $32768, R7 + MOVD R7, R9 + CMP R6, R7 // compare R6, R7 (MAX SIZE) + BGT top // less than MAX, just do remainder + MOVD R6, R7 + +top: + SUB R7, R6, R6 + + // mainloop does 128 bytes at a time + SRD $7, R7 + + // determine the offset into the constants table to start with. + // Each constant is 128 bytes, used against 16 bytes of data. + SLD $4, R7, R8 + SRD $3, R9, R9 + SUB R8, R9, R8 + + // The last iteration is reduced in a separate step + ADD $-1, R7 + MOVD R7, CTR + + // Determine which constant table (depends on poly) + CMP R14, $1 + BNE castTable + MOVD $·IEEEConst(SB), R3 + BR startConst + +castTable: + MOVD $·CastConst(SB), R3 + +startConst: + ADD R3, R8, R3 // starting point in constants table + + VXOR V0, V0, V0 // clear the V regs + VXOR V1, V1, V1 + VXOR V2, V2, V2 + VXOR V3, V3, V3 + VXOR V4, V4, V4 + VXOR V5, V5, V5 + VXOR V6, V6, V6 + VXOR V7, V7, V7 + + LVX (R3), const1 // loading constant values + + CMP R15, $1 // Identify warm up pass + BEQ next + + // First warm up pass: load the bytes to process + LVX (R4), V16 + LVX (R4+off16), V17 + LVX (R4+off32), V18 + LVX (R4+off48), V19 + LVX (R4+off64), V20 + LVX (R4+off80), V21 + LVX (R4+off96), V22 + LVX (R4+off112), V23 + ADD $128, R4 // bump up to next 128 bytes in buffer + + VXOR V16, V8, V16 // xor in initial CRC in V8 + +next: + BC 18, 0, first_warm_up_done + + ADD $16, R3 // bump up to next constants + LVX (R3), const2 // table values + + VPMSUMD V16, const1, V8 // second warm up pass + LVX (R4), V16 // load from buffer + OR $0, R2, R2 + + VPMSUMD V17, const1, V9 // vpmsumd with constants + LVX (R4+off16), V17 // load next from buffer + OR $0, R2, R2 + + VPMSUMD V18, const1, V10 // vpmsumd with constants + LVX (R4+off32), V18 // load next from buffer + OR $0, R2, R2 + + VPMSUMD V19, const1, V11 // vpmsumd with constants + LVX (R4+off48), V19 // load next from buffer + OR $0, R2, R2 + + VPMSUMD V20, const1, V12 // vpmsumd with constants + LVX (R4+off64), V20 // load next from buffer + OR $0, R2, R2 + + VPMSUMD V21, const1, V13 // vpmsumd with constants + LVX (R4+off80), V21 // load next from buffer + OR $0, R2, R2 + + VPMSUMD V22, const1, V14 // vpmsumd with constants + LVX (R4+off96), V22 // load next from buffer + OR $0, R2, R2 + + VPMSUMD V23, const1, V15 // vpmsumd with constants + LVX (R4+off112), V23 // load next from buffer + + ADD $128, R4 // bump up to next 128 bytes in buffer + + BC 18, 0, first_cool_down + +cool_top: + LVX (R3), const1 // constants + ADD $16, R3 // inc to next constants + OR $0, R2, R2 + + VXOR V0, V8, V0 // xor in previous vpmsumd + VPMSUMD V16, const2, V8 // vpmsumd with constants + LVX (R4), V16 // buffer + OR $0, R2, R2 + + VXOR V1, V9, V1 // xor in previous + VPMSUMD V17, const2, V9 // vpmsumd with constants + LVX (R4+off16), V17 // next in buffer + OR $0, R2, R2 + + VXOR V2, V10, V2 // xor in previous + VPMSUMD V18, const2, V10 // vpmsumd with constants + LVX (R4+off32), V18 // next in buffer + OR $0, R2, R2 + + VXOR V3, V11, V3 // xor in previous + VPMSUMD V19, const2, V11 // vpmsumd with constants + LVX (R4+off48), V19 // next in buffer + LVX (R3), const2 // get next constant + OR $0, R2, R2 + + VXOR V4, V12, V4 // xor in previous + VPMSUMD V20, const1, V12 // vpmsumd with constants + LVX (R4+off64), V20 // next in buffer + OR $0, R2, R2 + + VXOR V5, V13, V5 // xor in previous + VPMSUMD V21, const1, V13 // vpmsumd with constants + LVX (R4+off80), V21 // next in buffer + OR $0, R2, R2 + + VXOR V6, V14, V6 // xor in previous + VPMSUMD V22, const1, V14 // vpmsumd with constants + LVX (R4+off96), V22 // next in buffer + OR $0, R2, R2 + + VXOR V7, V15, V7 // xor in previous + VPMSUMD V23, const1, V15 // vpmsumd with constants + LVX (R4+off112), V23 // next in buffer + + ADD $128, R4 // bump up buffer pointer + BDNZ cool_top // are we done? + +first_cool_down: + + // load the constants + // xor in the previous value + // vpmsumd the result with constants + + LVX (R3), const1 + ADD $16, R3 + + VXOR V0, V8, V0 + VPMSUMD V16, const1, V8 + OR $0, R2, R2 + + VXOR V1, V9, V1 + VPMSUMD V17, const1, V9 + OR $0, R2, R2 + + VXOR V2, V10, V2 + VPMSUMD V18, const1, V10 + OR $0, R2, R2 + + VXOR V3, V11, V3 + VPMSUMD V19, const1, V11 + OR $0, R2, R2 + + VXOR V4, V12, V4 + VPMSUMD V20, const1, V12 + OR $0, R2, R2 + + VXOR V5, V13, V5 + VPMSUMD V21, const1, V13 + OR $0, R2, R2 + + VXOR V6, V14, V6 + VPMSUMD V22, const1, V14 + OR $0, R2, R2 + + VXOR V7, V15, V7 + VPMSUMD V23, const1, V15 + OR $0, R2, R2 + +second_cool_down: + + VXOR V0, V8, V0 + VXOR V1, V9, V1 + VXOR V2, V10, V2 + VXOR V3, V11, V3 + VXOR V4, V12, V4 + VXOR V5, V13, V5 + VXOR V6, V14, V6 + VXOR V7, V15, V7 + +#ifdef REFLECT + VSLDOI $4, V0, zeroes, V0 + VSLDOI $4, V1, zeroes, V1 + VSLDOI $4, V2, zeroes, V2 + VSLDOI $4, V3, zeroes, V3 + VSLDOI $4, V4, zeroes, V4 + VSLDOI $4, V5, zeroes, V5 + VSLDOI $4, V6, zeroes, V6 + VSLDOI $4, V7, zeroes, V7 + +#endif + + LVX (R4), V8 + LVX (R4+off16), V9 + LVX (R4+off32), V10 + LVX (R4+off48), V11 + LVX (R4+off64), V12 + LVX (R4+off80), V13 + LVX (R4+off96), V14 + LVX (R4+off112), V15 + + ADD $128, R4 + + VXOR V0, V8, V16 + VXOR V1, V9, V17 + VXOR V2, V10, V18 + VXOR V3, V11, V19 + VXOR V4, V12, V20 + VXOR V5, V13, V21 + VXOR V6, V14, V22 + VXOR V7, V15, V23 + + MOVD $1, R15 + CMP $0, R6 + ADD $128, R6 + + BNE l1 + ANDCC $127, R5 + SUBC R5, $128, R6 + ADD R3, R6, R3 + + SRD $4, R5, R7 + MOVD R7, CTR + LVX (R3), V0 + LVX (R3+off16), V1 + LVX (R3+off32), V2 + LVX (R3+off48), V3 + LVX (R3+off64), V4 + LVX (R3+off80), V5 + LVX (R3+off96), V6 + LVX (R3+off112), V7 + + ADD $128, R3 + + VPMSUMW V16, V0, V0 + VPMSUMW V17, V1, V1 + VPMSUMW V18, V2, V2 + VPMSUMW V19, V3, V3 + VPMSUMW V20, V4, V4 + VPMSUMW V21, V5, V5 + VPMSUMW V22, V6, V6 + VPMSUMW V23, V7, V7 + + // now reduce the tail + + CMP $0, R7 + BEQ next1 + + LVX (R4), V16 + LVX (R3), V17 + VPMSUMW V16, V17, V16 + VXOR V0, V16, V0 + BC 18, 0, next1 + + LVX (R4+off16), V16 + LVX (R3+off16), V17 + VPMSUMW V16, V17, V16 + VXOR V0, V16, V0 + BC 18, 0, next1 + + LVX (R4+off32), V16 + LVX (R3+off32), V17 + VPMSUMW V16, V17, V16 + VXOR V0, V16, V0 + BC 18, 0, next1 + + LVX (R4+off48), V16 + LVX (R3+off48), V17 + VPMSUMW V16, V17, V16 + VXOR V0, V16, V0 + BC 18, 0, next1 + + LVX (R4+off64), V16 + LVX (R3+off64), V17 + VPMSUMW V16, V17, V16 + VXOR V0, V16, V0 + BC 18, 0, next1 + + LVX (R4+off80), V16 + LVX (R3+off80), V17 + VPMSUMW V16, V17, V16 + VXOR V0, V16, V0 + BC 18, 0, next1 + + LVX (R4+off96), V16 + LVX (R3+off96), V17 + VPMSUMW V16, V17, V16 + VXOR V0, V16, V0 + +next1: + VXOR V0, V1, V0 + VXOR V2, V3, V2 + VXOR V4, V5, V4 + VXOR V6, V7, V6 + VXOR V0, V2, V0 + VXOR V4, V6, V4 + VXOR V0, V4, V0 + +barrett_reduction: + + CMP R14, $1 + BNE barcstTable + MOVD $·IEEEBarConst(SB), R3 + BR startbarConst + +barcstTable: + MOVD $·CastBarConst(SB), R3 + +startbarConst: + LVX (R3), const1 + LVX (R3+off16), const2 + + VSLDOI $8, V0, V0, V1 + VXOR V0, V1, V0 + +#ifdef REFLECT + VSPLTISB $1, V1 + VSL V0, V1, V0 + +#endif + + VAND V0, mask_64bit, V0 + +#ifndef REFLECT + + VPMSUMD V0, const1, V1 + VSLDOI $8, zeroes, V1, V1 + VPMSUMD V1, const2, V1 + VXOR V0, V1, V0 + VSLDOI $8, V0, zeroes, V0 + +#else + + VAND V0, mask_32bit, V1 + VPMSUMD V1, const1, V1 + VAND V1, mask_32bit, V1 + VPMSUMD V1, const2, V1 + VXOR V0, V1, V0 + VSLDOI $4, V0, zeroes, V0 + +#endif + + MFVSRD VS32, R3 // VS32 = V0 + + NOR R3, R3, R3 // return ^crc + MOVW R3, ret+32(FP) + RET + +first_warm_up_done: + + LVX (R3), const1 + ADD $16, R3 + + VPMSUMD V16, const1, V8 + VPMSUMD V17, const1, V9 + VPMSUMD V18, const1, V10 + VPMSUMD V19, const1, V11 + VPMSUMD V20, const1, V12 + VPMSUMD V21, const1, V13 + VPMSUMD V22, const1, V14 + VPMSUMD V23, const1, V15 + + BR second_cool_down + +short: + CMP $0, R5 + BEQ zero + + // compute short constants + + CMP R14, $1 + BNE castshTable + MOVD $·IEEEConst(SB), R3 + ADD $4080, R3 + BR startshConst + +castshTable: + MOVD $·CastConst(SB), R3 + ADD $4080, R3 + +startshConst: + SUBC R5, $256, R6 // sub from 256 + ADD R3, R6, R3 + + // calculate where to start + + SRD $4, R5, R7 + MOVD R7, CTR + + VXOR V19, V19, V19 + VXOR V20, V20, V20 + + LVX (R4), V0 + LVX (R3), V16 + VXOR V0, V8, V0 + VPMSUMW V0, V16, V0 + BC 18, 0, v0 + + LVX (R4+off16), V1 + LVX (R3+off16), V17 + VPMSUMW V1, V17, V1 + BC 18, 0, v1 + + LVX (R4+off32), V2 + LVX (R3+off32), V16 + VPMSUMW V2, V16, V2 + BC 18, 0, v2 + + LVX (R4+off48), V3 + LVX (R3+off48), V17 + VPMSUMW V3, V17, V3 + BC 18, 0, v3 + + LVX (R4+off64), V4 + LVX (R3+off64), V16 + VPMSUMW V4, V16, V4 + BC 18, 0, v4 + + LVX (R4+off80), V5 + LVX (R3+off80), V17 + VPMSUMW V5, V17, V5 + BC 18, 0, v5 + + LVX (R4+off96), V6 + LVX (R3+off96), V16 + VPMSUMW V6, V16, V6 + BC 18, 0, v6 + + LVX (R4+off112), V7 + LVX (R3+off112), V17 + VPMSUMW V7, V17, V7 + BC 18, 0, v7 + + ADD $128, R3 + ADD $128, R4 + + LVX (R4), V8 + LVX (R3), V16 + VPMSUMW V8, V16, V8 + BC 18, 0, v8 + + LVX (R4+off16), V9 + LVX (R3+off16), V17 + VPMSUMW V9, V17, V9 + BC 18, 0, v9 + + LVX (R4+off32), V10 + LVX (R3+off32), V16 + VPMSUMW V10, V16, V10 + BC 18, 0, v10 + + LVX (R4+off48), V11 + LVX (R3+off48), V17 + VPMSUMW V11, V17, V11 + BC 18, 0, v11 + + LVX (R4+off64), V12 + LVX (R3+off64), V16 + VPMSUMW V12, V16, V12 + BC 18, 0, v12 + + LVX (R4+off80), V13 + LVX (R3+off80), V17 + VPMSUMW V13, V17, V13 + BC 18, 0, v13 + + LVX (R4+off96), V14 + LVX (R3+off96), V16 + VPMSUMW V14, V16, V14 + BC 18, 0, v14 + + LVX (R4+off112), V15 + LVX (R3+off112), V17 + VPMSUMW V15, V17, V15 + + VXOR V19, V15, V19 + +v14: + VXOR V20, V14, V20 + +v13: + VXOR V19, V13, V19 + +v12: + VXOR V20, V12, V20 + +v11: + VXOR V19, V11, V19 + +v10: + VXOR V20, V10, V20 + +v9: + VXOR V19, V9, V19 + +v8: + VXOR V20, V8, V20 + +v7: + VXOR V19, V7, V19 + +v6: + VXOR V20, V6, V20 + +v5: + VXOR V19, V5, V19 + +v4: + VXOR V20, V4, V20 + +v3: + VXOR V19, V3, V19 + +v2: + VXOR V20, V2, V20 + +v1: + VXOR V19, V1, V19 + +v0: + VXOR V20, V0, V20 + + VXOR V19, V20, V0 + + BR barrett_reduction + +zero: + // This case is the original crc, so just return it + MOVW R10, ret+32(FP) + RET diff --git a/vendor/github.com/klauspost/crc32/crc32_s390x.go b/vendor/github.com/klauspost/crc32/crc32_s390x.go new file mode 100644 index 00000000000..67b4ea7d989 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_s390x.go @@ -0,0 +1,91 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package crc32 + +import "golang.org/x/sys/cpu" + +const ( + vxMinLen = 64 + vxAlignMask = 15 // align to 16 bytes +) + +// hasVX reports whether the machine has the z/Architecture +// vector facility installed and enabled. +var hasVX = cpu.S390X.HasVX + +// vectorizedCastagnoli implements CRC32 using vector instructions. +// It is defined in crc32_s390x.s. +// +//go:noescape +func vectorizedCastagnoli(crc uint32, p []byte) uint32 + +// vectorizedIEEE implements CRC32 using vector instructions. +// It is defined in crc32_s390x.s. +// +//go:noescape +func vectorizedIEEE(crc uint32, p []byte) uint32 + +func archAvailableCastagnoli() bool { + return hasVX +} + +var archCastagnoliTable8 *slicing8Table + +func archInitCastagnoli() { + if !hasVX { + panic("not available") + } + // We still use slicing-by-8 for small buffers. + archCastagnoliTable8 = slicingMakeTable(Castagnoli) +} + +// archUpdateCastagnoli calculates the checksum of p using +// vectorizedCastagnoli. +func archUpdateCastagnoli(crc uint32, p []byte) uint32 { + if !hasVX { + panic("not available") + } + // Use vectorized function if data length is above threshold. + if len(p) >= vxMinLen { + aligned := len(p) & ^vxAlignMask + crc = vectorizedCastagnoli(crc, p[:aligned]) + p = p[aligned:] + } + if len(p) == 0 { + return crc + } + return slicingUpdate(crc, archCastagnoliTable8, p) +} + +func archAvailableIEEE() bool { + return hasVX +} + +var archIeeeTable8 *slicing8Table + +func archInitIEEE() { + if !hasVX { + panic("not available") + } + // We still use slicing-by-8 for small buffers. + archIeeeTable8 = slicingMakeTable(IEEE) +} + +// archUpdateIEEE calculates the checksum of p using vectorizedIEEE. +func archUpdateIEEE(crc uint32, p []byte) uint32 { + if !hasVX { + panic("not available") + } + // Use vectorized function if data length is above threshold. + if len(p) >= vxMinLen { + aligned := len(p) & ^vxAlignMask + crc = vectorizedIEEE(crc, p[:aligned]) + p = p[aligned:] + } + if len(p) == 0 { + return crc + } + return slicingUpdate(crc, archIeeeTable8, p) +} diff --git a/vendor/github.com/klauspost/crc32/crc32_s390x.s b/vendor/github.com/klauspost/crc32/crc32_s390x.s new file mode 100644 index 00000000000..aefda50e1f5 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_s390x.s @@ -0,0 +1,225 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// Vector register range containing CRC-32 constants + +#define CONST_PERM_LE2BE V9 +#define CONST_R2R1 V10 +#define CONST_R4R3 V11 +#define CONST_R5 V12 +#define CONST_RU_POLY V13 +#define CONST_CRC_POLY V14 + +// The CRC-32 constant block contains reduction constants to fold and +// process particular chunks of the input data stream in parallel. +// +// Note that the constant definitions below are extended in order to compute +// intermediate results with a single VECTOR GALOIS FIELD MULTIPLY instruction. +// The rightmost doubleword can be 0 to prevent contribution to the result or +// can be multiplied by 1 to perform an XOR without the need for a separate +// VECTOR EXCLUSIVE OR instruction. +// +// The polynomials used are bit-reflected: +// +// IEEE: P'(x) = 0x0edb88320 +// Castagnoli: P'(x) = 0x082f63b78 + +// IEEE polynomial constants +DATA ·crclecons+0(SB)/8, $0x0F0E0D0C0B0A0908 // LE-to-BE mask +DATA ·crclecons+8(SB)/8, $0x0706050403020100 +DATA ·crclecons+16(SB)/8, $0x00000001c6e41596 // R2 +DATA ·crclecons+24(SB)/8, $0x0000000154442bd4 // R1 +DATA ·crclecons+32(SB)/8, $0x00000000ccaa009e // R4 +DATA ·crclecons+40(SB)/8, $0x00000001751997d0 // R3 +DATA ·crclecons+48(SB)/8, $0x0000000000000000 +DATA ·crclecons+56(SB)/8, $0x0000000163cd6124 // R5 +DATA ·crclecons+64(SB)/8, $0x0000000000000000 +DATA ·crclecons+72(SB)/8, $0x00000001F7011641 // u' +DATA ·crclecons+80(SB)/8, $0x0000000000000000 +DATA ·crclecons+88(SB)/8, $0x00000001DB710641 // P'(x) << 1 + +GLOBL ·crclecons(SB), RODATA, $144 + +// Castagonli Polynomial constants +DATA ·crcclecons+0(SB)/8, $0x0F0E0D0C0B0A0908 // LE-to-BE mask +DATA ·crcclecons+8(SB)/8, $0x0706050403020100 +DATA ·crcclecons+16(SB)/8, $0x000000009e4addf8 // R2 +DATA ·crcclecons+24(SB)/8, $0x00000000740eef02 // R1 +DATA ·crcclecons+32(SB)/8, $0x000000014cd00bd6 // R4 +DATA ·crcclecons+40(SB)/8, $0x00000000f20c0dfe // R3 +DATA ·crcclecons+48(SB)/8, $0x0000000000000000 +DATA ·crcclecons+56(SB)/8, $0x00000000dd45aab8 // R5 +DATA ·crcclecons+64(SB)/8, $0x0000000000000000 +DATA ·crcclecons+72(SB)/8, $0x00000000dea713f1 // u' +DATA ·crcclecons+80(SB)/8, $0x0000000000000000 +DATA ·crcclecons+88(SB)/8, $0x0000000105ec76f0 // P'(x) << 1 + +GLOBL ·crcclecons(SB), RODATA, $144 + +// The CRC-32 function(s) use these calling conventions: +// +// Parameters: +// +// R2: Initial CRC value, typically ~0; and final CRC (return) value. +// R3: Input buffer pointer, performance might be improved if the +// buffer is on a doubleword boundary. +// R4: Length of the buffer, must be 64 bytes or greater. +// +// Register usage: +// +// R5: CRC-32 constant pool base pointer. +// V0: Initial CRC value and intermediate constants and results. +// V1..V4: Data for CRC computation. +// V5..V8: Next data chunks that are fetched from the input buffer. +// +// V9..V14: CRC-32 constants. + +// func vectorizedIEEE(crc uint32, p []byte) uint32 +TEXT ·vectorizedIEEE(SB), NOSPLIT, $0 + MOVWZ crc+0(FP), R2 // R2 stores the CRC value + MOVD p+8(FP), R3 // data pointer + MOVD p_len+16(FP), R4 // len(p) + + MOVD $·crclecons(SB), R5 + BR vectorizedBody<>(SB) + +// func vectorizedCastagnoli(crc uint32, p []byte) uint32 +TEXT ·vectorizedCastagnoli(SB), NOSPLIT, $0 + MOVWZ crc+0(FP), R2 // R2 stores the CRC value + MOVD p+8(FP), R3 // data pointer + MOVD p_len+16(FP), R4 // len(p) + + // R5: crc-32 constant pool base pointer, constant is used to reduce crc + MOVD $·crcclecons(SB), R5 + BR vectorizedBody<>(SB) + +TEXT vectorizedBody<>(SB), NOSPLIT, $0 + XOR $0xffffffff, R2 // NOTW R2 + VLM 0(R5), CONST_PERM_LE2BE, CONST_CRC_POLY + + // Load the initial CRC value into the rightmost word of V0 + VZERO V0 + VLVGF $3, R2, V0 + + // Crash if the input size is less than 64-bytes. + CMP R4, $64 + BLT crash + + // Load a 64-byte data chunk and XOR with CRC + VLM 0(R3), V1, V4 // 64-bytes into V1..V4 + + // Reflect the data if the CRC operation is in the bit-reflected domain + VPERM V1, V1, CONST_PERM_LE2BE, V1 + VPERM V2, V2, CONST_PERM_LE2BE, V2 + VPERM V3, V3, CONST_PERM_LE2BE, V3 + VPERM V4, V4, CONST_PERM_LE2BE, V4 + + VX V0, V1, V1 // V1 ^= CRC + ADD $64, R3 // BUF = BUF + 64 + ADD $(-64), R4 + + // Check remaining buffer size and jump to proper folding method + CMP R4, $64 + BLT less_than_64bytes + +fold_64bytes_loop: + // Load the next 64-byte data chunk into V5 to V8 + VLM 0(R3), V5, V8 + VPERM V5, V5, CONST_PERM_LE2BE, V5 + VPERM V6, V6, CONST_PERM_LE2BE, V6 + VPERM V7, V7, CONST_PERM_LE2BE, V7 + VPERM V8, V8, CONST_PERM_LE2BE, V8 + + // Perform a GF(2) multiplication of the doublewords in V1 with + // the reduction constants in V0. The intermediate result is + // then folded (accumulated) with the next data chunk in V5 and + // stored in V1. Repeat this step for the register contents + // in V2, V3, and V4 respectively. + + VGFMAG CONST_R2R1, V1, V5, V1 + VGFMAG CONST_R2R1, V2, V6, V2 + VGFMAG CONST_R2R1, V3, V7, V3 + VGFMAG CONST_R2R1, V4, V8, V4 + + // Adjust buffer pointer and length for next loop + ADD $64, R3 // BUF = BUF + 64 + ADD $(-64), R4 // LEN = LEN - 64 + + CMP R4, $64 + BGE fold_64bytes_loop + +less_than_64bytes: + // Fold V1 to V4 into a single 128-bit value in V1 + VGFMAG CONST_R4R3, V1, V2, V1 + VGFMAG CONST_R4R3, V1, V3, V1 + VGFMAG CONST_R4R3, V1, V4, V1 + + // Check whether to continue with 64-bit folding + CMP R4, $16 + BLT final_fold + +fold_16bytes_loop: + VL 0(R3), V2 // Load next data chunk + VPERM V2, V2, CONST_PERM_LE2BE, V2 + + VGFMAG CONST_R4R3, V1, V2, V1 // Fold next data chunk + + // Adjust buffer pointer and size for folding next data chunk + ADD $16, R3 + ADD $-16, R4 + + // Process remaining data chunks + CMP R4, $16 + BGE fold_16bytes_loop + +final_fold: + VLEIB $7, $0x40, V9 + VSRLB V9, CONST_R4R3, V0 + VLEIG $0, $1, V0 + + VGFMG V0, V1, V1 + + VLEIB $7, $0x20, V9 // Shift by words + VSRLB V9, V1, V2 // Store remaining bits in V2 + VUPLLF V1, V1 // Split rightmost doubleword + VGFMAG CONST_R5, V1, V2, V1 // V1 = (V1 * R5) XOR V2 + + // The input values to the Barret reduction are the degree-63 polynomial + // in V1 (R(x)), degree-32 generator polynomial, and the reduction + // constant u. The Barret reduction result is the CRC value of R(x) mod + // P(x). + // + // The Barret reduction algorithm is defined as: + // + // 1. T1(x) = floor( R(x) / x^32 ) GF2MUL u + // 2. T2(x) = floor( T1(x) / x^32 ) GF2MUL P(x) + // 3. C(x) = R(x) XOR T2(x) mod x^32 + // + // Note: To compensate the division by x^32, use the vector unpack + // instruction to move the leftmost word into the leftmost doubleword + // of the vector register. The rightmost doubleword is multiplied + // with zero to not contribute to the intermediate results. + + // T1(x) = floor( R(x) / x^32 ) GF2MUL u + VUPLLF V1, V2 + VGFMG CONST_RU_POLY, V2, V2 + + // Compute the GF(2) product of the CRC polynomial in VO with T1(x) in + // V2 and XOR the intermediate result, T2(x), with the value in V1. + // The final result is in the rightmost word of V2. + + VUPLLF V2, V2 + VGFMAG CONST_CRC_POLY, V2, V1, V2 + +done: + VLGVF $2, V2, R2 + XOR $0xffffffff, R2 // NOTW R2 + MOVWZ R2, ret + 32(FP) + RET + +crash: + MOVD $0, (R0) // input size is less than 64-bytes + diff --git a/vendor/github.com/klauspost/crc32/crc32_table_ppc64le.s b/vendor/github.com/klauspost/crc32/crc32_table_ppc64le.s new file mode 100644 index 00000000000..1f3c1efda7f --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_table_ppc64le.s @@ -0,0 +1,3285 @@ +// Code generated by "go run gen_const_ppc64le.go"; DO NOT EDIT. + +#include "textflag.h" + +// Reduce 262144 kbits to 1024 bits +// x^261184 mod p(x), x^261120 mod p(x) +DATA ·IEEEConst+0(SB)/8, $0x0000000099ea94a8 +DATA ·IEEEConst+8(SB)/8, $0x00000001651797d2 + +// x^260160 mod p(x), x^260096 mod p(x) +DATA ·IEEEConst+16(SB)/8, $0x00000000945a8420 +DATA ·IEEEConst+24(SB)/8, $0x0000000021e0d56c + +// x^259136 mod p(x), x^259072 mod p(x) +DATA ·IEEEConst+32(SB)/8, $0x0000000030762706 +DATA ·IEEEConst+40(SB)/8, $0x000000000f95ecaa + +// x^258112 mod p(x), x^258048 mod p(x) +DATA ·IEEEConst+48(SB)/8, $0x00000001a52fc582 +DATA ·IEEEConst+56(SB)/8, $0x00000001ebd224ac + +// x^257088 mod p(x), x^257024 mod p(x) +DATA ·IEEEConst+64(SB)/8, $0x00000001a4a7167a +DATA ·IEEEConst+72(SB)/8, $0x000000000ccb97ca + +// x^256064 mod p(x), x^256000 mod p(x) +DATA ·IEEEConst+80(SB)/8, $0x000000000c18249a +DATA ·IEEEConst+88(SB)/8, $0x00000001006ec8a8 + +// x^255040 mod p(x), x^254976 mod p(x) +DATA ·IEEEConst+96(SB)/8, $0x00000000a924ae7c +DATA ·IEEEConst+104(SB)/8, $0x000000014f58f196 + +// x^254016 mod p(x), x^253952 mod p(x) +DATA ·IEEEConst+112(SB)/8, $0x00000001e12ccc12 +DATA ·IEEEConst+120(SB)/8, $0x00000001a7192ca6 + +// x^252992 mod p(x), x^252928 mod p(x) +DATA ·IEEEConst+128(SB)/8, $0x00000000a0b9d4ac +DATA ·IEEEConst+136(SB)/8, $0x000000019a64bab2 + +// x^251968 mod p(x), x^251904 mod p(x) +DATA ·IEEEConst+144(SB)/8, $0x0000000095e8ddfe +DATA ·IEEEConst+152(SB)/8, $0x0000000014f4ed2e + +// x^250944 mod p(x), x^250880 mod p(x) +DATA ·IEEEConst+160(SB)/8, $0x00000000233fddc4 +DATA ·IEEEConst+168(SB)/8, $0x000000011092b6a2 + +// x^249920 mod p(x), x^249856 mod p(x) +DATA ·IEEEConst+176(SB)/8, $0x00000001b4529b62 +DATA ·IEEEConst+184(SB)/8, $0x00000000c8a1629c + +// x^248896 mod p(x), x^248832 mod p(x) +DATA ·IEEEConst+192(SB)/8, $0x00000001a7fa0e64 +DATA ·IEEEConst+200(SB)/8, $0x000000017bf32e8e + +// x^247872 mod p(x), x^247808 mod p(x) +DATA ·IEEEConst+208(SB)/8, $0x00000001b5334592 +DATA ·IEEEConst+216(SB)/8, $0x00000001f8cc6582 + +// x^246848 mod p(x), x^246784 mod p(x) +DATA ·IEEEConst+224(SB)/8, $0x000000011f8ee1b4 +DATA ·IEEEConst+232(SB)/8, $0x000000008631ddf0 + +// x^245824 mod p(x), x^245760 mod p(x) +DATA ·IEEEConst+240(SB)/8, $0x000000006252e632 +DATA ·IEEEConst+248(SB)/8, $0x000000007e5a76d0 + +// x^244800 mod p(x), x^244736 mod p(x) +DATA ·IEEEConst+256(SB)/8, $0x00000000ab973e84 +DATA ·IEEEConst+264(SB)/8, $0x000000002b09b31c + +// x^243776 mod p(x), x^243712 mod p(x) +DATA ·IEEEConst+272(SB)/8, $0x000000007734f5ec +DATA ·IEEEConst+280(SB)/8, $0x00000001b2df1f84 + +// x^242752 mod p(x), x^242688 mod p(x) +DATA ·IEEEConst+288(SB)/8, $0x000000007c547798 +DATA ·IEEEConst+296(SB)/8, $0x00000001d6f56afc + +// x^241728 mod p(x), x^241664 mod p(x) +DATA ·IEEEConst+304(SB)/8, $0x000000007ec40210 +DATA ·IEEEConst+312(SB)/8, $0x00000001b9b5e70c + +// x^240704 mod p(x), x^240640 mod p(x) +DATA ·IEEEConst+320(SB)/8, $0x00000001ab1695a8 +DATA ·IEEEConst+328(SB)/8, $0x0000000034b626d2 + +// x^239680 mod p(x), x^239616 mod p(x) +DATA ·IEEEConst+336(SB)/8, $0x0000000090494bba +DATA ·IEEEConst+344(SB)/8, $0x000000014c53479a + +// x^238656 mod p(x), x^238592 mod p(x) +DATA ·IEEEConst+352(SB)/8, $0x00000001123fb816 +DATA ·IEEEConst+360(SB)/8, $0x00000001a6d179a4 + +// x^237632 mod p(x), x^237568 mod p(x) +DATA ·IEEEConst+368(SB)/8, $0x00000001e188c74c +DATA ·IEEEConst+376(SB)/8, $0x000000015abd16b4 + +// x^236608 mod p(x), x^236544 mod p(x) +DATA ·IEEEConst+384(SB)/8, $0x00000001c2d3451c +DATA ·IEEEConst+392(SB)/8, $0x00000000018f9852 + +// x^235584 mod p(x), x^235520 mod p(x) +DATA ·IEEEConst+400(SB)/8, $0x00000000f55cf1ca +DATA ·IEEEConst+408(SB)/8, $0x000000001fb3084a + +// x^234560 mod p(x), x^234496 mod p(x) +DATA ·IEEEConst+416(SB)/8, $0x00000001a0531540 +DATA ·IEEEConst+424(SB)/8, $0x00000000c53dfb04 + +// x^233536 mod p(x), x^233472 mod p(x) +DATA ·IEEEConst+432(SB)/8, $0x0000000132cd7ebc +DATA ·IEEEConst+440(SB)/8, $0x00000000e10c9ad6 + +// x^232512 mod p(x), x^232448 mod p(x) +DATA ·IEEEConst+448(SB)/8, $0x0000000073ab7f36 +DATA ·IEEEConst+456(SB)/8, $0x0000000025aa994a + +// x^231488 mod p(x), x^231424 mod p(x) +DATA ·IEEEConst+464(SB)/8, $0x0000000041aed1c2 +DATA ·IEEEConst+472(SB)/8, $0x00000000fa3a74c4 + +// x^230464 mod p(x), x^230400 mod p(x) +DATA ·IEEEConst+480(SB)/8, $0x0000000136c53800 +DATA ·IEEEConst+488(SB)/8, $0x0000000033eb3f40 + +// x^229440 mod p(x), x^229376 mod p(x) +DATA ·IEEEConst+496(SB)/8, $0x0000000126835a30 +DATA ·IEEEConst+504(SB)/8, $0x000000017193f296 + +// x^228416 mod p(x), x^228352 mod p(x) +DATA ·IEEEConst+512(SB)/8, $0x000000006241b502 +DATA ·IEEEConst+520(SB)/8, $0x0000000043f6c86a + +// x^227392 mod p(x), x^227328 mod p(x) +DATA ·IEEEConst+528(SB)/8, $0x00000000d5196ad4 +DATA ·IEEEConst+536(SB)/8, $0x000000016b513ec6 + +// x^226368 mod p(x), x^226304 mod p(x) +DATA ·IEEEConst+544(SB)/8, $0x000000009cfa769a +DATA ·IEEEConst+552(SB)/8, $0x00000000c8f25b4e + +// x^225344 mod p(x), x^225280 mod p(x) +DATA ·IEEEConst+560(SB)/8, $0x00000000920e5df4 +DATA ·IEEEConst+568(SB)/8, $0x00000001a45048ec + +// x^224320 mod p(x), x^224256 mod p(x) +DATA ·IEEEConst+576(SB)/8, $0x0000000169dc310e +DATA ·IEEEConst+584(SB)/8, $0x000000000c441004 + +// x^223296 mod p(x), x^223232 mod p(x) +DATA ·IEEEConst+592(SB)/8, $0x0000000009fc331c +DATA ·IEEEConst+600(SB)/8, $0x000000000e17cad6 + +// x^222272 mod p(x), x^222208 mod p(x) +DATA ·IEEEConst+608(SB)/8, $0x000000010d94a81e +DATA ·IEEEConst+616(SB)/8, $0x00000001253ae964 + +// x^221248 mod p(x), x^221184 mod p(x) +DATA ·IEEEConst+624(SB)/8, $0x0000000027a20ab2 +DATA ·IEEEConst+632(SB)/8, $0x00000001d7c88ebc + +// x^220224 mod p(x), x^220160 mod p(x) +DATA ·IEEEConst+640(SB)/8, $0x0000000114f87504 +DATA ·IEEEConst+648(SB)/8, $0x00000001e7ca913a + +// x^219200 mod p(x), x^219136 mod p(x) +DATA ·IEEEConst+656(SB)/8, $0x000000004b076d96 +DATA ·IEEEConst+664(SB)/8, $0x0000000033ed078a + +// x^218176 mod p(x), x^218112 mod p(x) +DATA ·IEEEConst+672(SB)/8, $0x00000000da4d1e74 +DATA ·IEEEConst+680(SB)/8, $0x00000000e1839c78 + +// x^217152 mod p(x), x^217088 mod p(x) +DATA ·IEEEConst+688(SB)/8, $0x000000001b81f672 +DATA ·IEEEConst+696(SB)/8, $0x00000001322b267e + +// x^216128 mod p(x), x^216064 mod p(x) +DATA ·IEEEConst+704(SB)/8, $0x000000009367c988 +DATA ·IEEEConst+712(SB)/8, $0x00000000638231b6 + +// x^215104 mod p(x), x^215040 mod p(x) +DATA ·IEEEConst+720(SB)/8, $0x00000001717214ca +DATA ·IEEEConst+728(SB)/8, $0x00000001ee7f16f4 + +// x^214080 mod p(x), x^214016 mod p(x) +DATA ·IEEEConst+736(SB)/8, $0x000000009f47d820 +DATA ·IEEEConst+744(SB)/8, $0x0000000117d9924a + +// x^213056 mod p(x), x^212992 mod p(x) +DATA ·IEEEConst+752(SB)/8, $0x000000010d9a47d2 +DATA ·IEEEConst+760(SB)/8, $0x00000000e1a9e0c4 + +// x^212032 mod p(x), x^211968 mod p(x) +DATA ·IEEEConst+768(SB)/8, $0x00000000a696c58c +DATA ·IEEEConst+776(SB)/8, $0x00000001403731dc + +// x^211008 mod p(x), x^210944 mod p(x) +DATA ·IEEEConst+784(SB)/8, $0x000000002aa28ec6 +DATA ·IEEEConst+792(SB)/8, $0x00000001a5ea9682 + +// x^209984 mod p(x), x^209920 mod p(x) +DATA ·IEEEConst+800(SB)/8, $0x00000001fe18fd9a +DATA ·IEEEConst+808(SB)/8, $0x0000000101c5c578 + +// x^208960 mod p(x), x^208896 mod p(x) +DATA ·IEEEConst+816(SB)/8, $0x000000019d4fc1ae +DATA ·IEEEConst+824(SB)/8, $0x00000000dddf6494 + +// x^207936 mod p(x), x^207872 mod p(x) +DATA ·IEEEConst+832(SB)/8, $0x00000001ba0e3dea +DATA ·IEEEConst+840(SB)/8, $0x00000000f1c3db28 + +// x^206912 mod p(x), x^206848 mod p(x) +DATA ·IEEEConst+848(SB)/8, $0x0000000074b59a5e +DATA ·IEEEConst+856(SB)/8, $0x000000013112fb9c + +// x^205888 mod p(x), x^205824 mod p(x) +DATA ·IEEEConst+864(SB)/8, $0x00000000f2b5ea98 +DATA ·IEEEConst+872(SB)/8, $0x00000000b680b906 + +// x^204864 mod p(x), x^204800 mod p(x) +DATA ·IEEEConst+880(SB)/8, $0x0000000187132676 +DATA ·IEEEConst+888(SB)/8, $0x000000001a282932 + +// x^203840 mod p(x), x^203776 mod p(x) +DATA ·IEEEConst+896(SB)/8, $0x000000010a8c6ad4 +DATA ·IEEEConst+904(SB)/8, $0x0000000089406e7e + +// x^202816 mod p(x), x^202752 mod p(x) +DATA ·IEEEConst+912(SB)/8, $0x00000001e21dfe70 +DATA ·IEEEConst+920(SB)/8, $0x00000001def6be8c + +// x^201792 mod p(x), x^201728 mod p(x) +DATA ·IEEEConst+928(SB)/8, $0x00000001da0050e4 +DATA ·IEEEConst+936(SB)/8, $0x0000000075258728 + +// x^200768 mod p(x), x^200704 mod p(x) +DATA ·IEEEConst+944(SB)/8, $0x00000000772172ae +DATA ·IEEEConst+952(SB)/8, $0x000000019536090a + +// x^199744 mod p(x), x^199680 mod p(x) +DATA ·IEEEConst+960(SB)/8, $0x00000000e47724aa +DATA ·IEEEConst+968(SB)/8, $0x00000000f2455bfc + +// x^198720 mod p(x), x^198656 mod p(x) +DATA ·IEEEConst+976(SB)/8, $0x000000003cd63ac4 +DATA ·IEEEConst+984(SB)/8, $0x000000018c40baf4 + +// x^197696 mod p(x), x^197632 mod p(x) +DATA ·IEEEConst+992(SB)/8, $0x00000001bf47d352 +DATA ·IEEEConst+1000(SB)/8, $0x000000004cd390d4 + +// x^196672 mod p(x), x^196608 mod p(x) +DATA ·IEEEConst+1008(SB)/8, $0x000000018dc1d708 +DATA ·IEEEConst+1016(SB)/8, $0x00000001e4ece95a + +// x^195648 mod p(x), x^195584 mod p(x) +DATA ·IEEEConst+1024(SB)/8, $0x000000002d4620a4 +DATA ·IEEEConst+1032(SB)/8, $0x000000001a3ee918 + +// x^194624 mod p(x), x^194560 mod p(x) +DATA ·IEEEConst+1040(SB)/8, $0x0000000058fd1740 +DATA ·IEEEConst+1048(SB)/8, $0x000000007c652fb8 + +// x^193600 mod p(x), x^193536 mod p(x) +DATA ·IEEEConst+1056(SB)/8, $0x00000000dadd9bfc +DATA ·IEEEConst+1064(SB)/8, $0x000000011c67842c + +// x^192576 mod p(x), x^192512 mod p(x) +DATA ·IEEEConst+1072(SB)/8, $0x00000001ea2140be +DATA ·IEEEConst+1080(SB)/8, $0x00000000254f759c + +// x^191552 mod p(x), x^191488 mod p(x) +DATA ·IEEEConst+1088(SB)/8, $0x000000009de128ba +DATA ·IEEEConst+1096(SB)/8, $0x000000007ece94ca + +// x^190528 mod p(x), x^190464 mod p(x) +DATA ·IEEEConst+1104(SB)/8, $0x000000013ac3aa8e +DATA ·IEEEConst+1112(SB)/8, $0x0000000038f258c2 + +// x^189504 mod p(x), x^189440 mod p(x) +DATA ·IEEEConst+1120(SB)/8, $0x0000000099980562 +DATA ·IEEEConst+1128(SB)/8, $0x00000001cdf17b00 + +// x^188480 mod p(x), x^188416 mod p(x) +DATA ·IEEEConst+1136(SB)/8, $0x00000001c1579c86 +DATA ·IEEEConst+1144(SB)/8, $0x000000011f882c16 + +// x^187456 mod p(x), x^187392 mod p(x) +DATA ·IEEEConst+1152(SB)/8, $0x0000000068dbbf94 +DATA ·IEEEConst+1160(SB)/8, $0x0000000100093fc8 + +// x^186432 mod p(x), x^186368 mod p(x) +DATA ·IEEEConst+1168(SB)/8, $0x000000004509fb04 +DATA ·IEEEConst+1176(SB)/8, $0x00000001cd684f16 + +// x^185408 mod p(x), x^185344 mod p(x) +DATA ·IEEEConst+1184(SB)/8, $0x00000001202f6398 +DATA ·IEEEConst+1192(SB)/8, $0x000000004bc6a70a + +// x^184384 mod p(x), x^184320 mod p(x) +DATA ·IEEEConst+1200(SB)/8, $0x000000013aea243e +DATA ·IEEEConst+1208(SB)/8, $0x000000004fc7e8e4 + +// x^183360 mod p(x), x^183296 mod p(x) +DATA ·IEEEConst+1216(SB)/8, $0x00000001b4052ae6 +DATA ·IEEEConst+1224(SB)/8, $0x0000000130103f1c + +// x^182336 mod p(x), x^182272 mod p(x) +DATA ·IEEEConst+1232(SB)/8, $0x00000001cd2a0ae8 +DATA ·IEEEConst+1240(SB)/8, $0x0000000111b0024c + +// x^181312 mod p(x), x^181248 mod p(x) +DATA ·IEEEConst+1248(SB)/8, $0x00000001fe4aa8b4 +DATA ·IEEEConst+1256(SB)/8, $0x000000010b3079da + +// x^180288 mod p(x), x^180224 mod p(x) +DATA ·IEEEConst+1264(SB)/8, $0x00000001d1559a42 +DATA ·IEEEConst+1272(SB)/8, $0x000000010192bcc2 + +// x^179264 mod p(x), x^179200 mod p(x) +DATA ·IEEEConst+1280(SB)/8, $0x00000001f3e05ecc +DATA ·IEEEConst+1288(SB)/8, $0x0000000074838d50 + +// x^178240 mod p(x), x^178176 mod p(x) +DATA ·IEEEConst+1296(SB)/8, $0x0000000104ddd2cc +DATA ·IEEEConst+1304(SB)/8, $0x000000001b20f520 + +// x^177216 mod p(x), x^177152 mod p(x) +DATA ·IEEEConst+1312(SB)/8, $0x000000015393153c +DATA ·IEEEConst+1320(SB)/8, $0x0000000050c3590a + +// x^176192 mod p(x), x^176128 mod p(x) +DATA ·IEEEConst+1328(SB)/8, $0x0000000057e942c6 +DATA ·IEEEConst+1336(SB)/8, $0x00000000b41cac8e + +// x^175168 mod p(x), x^175104 mod p(x) +DATA ·IEEEConst+1344(SB)/8, $0x000000012c633850 +DATA ·IEEEConst+1352(SB)/8, $0x000000000c72cc78 + +// x^174144 mod p(x), x^174080 mod p(x) +DATA ·IEEEConst+1360(SB)/8, $0x00000000ebcaae4c +DATA ·IEEEConst+1368(SB)/8, $0x0000000030cdb032 + +// x^173120 mod p(x), x^173056 mod p(x) +DATA ·IEEEConst+1376(SB)/8, $0x000000013ee532a6 +DATA ·IEEEConst+1384(SB)/8, $0x000000013e09fc32 + +// x^172096 mod p(x), x^172032 mod p(x) +DATA ·IEEEConst+1392(SB)/8, $0x00000001bf0cbc7e +DATA ·IEEEConst+1400(SB)/8, $0x000000001ed624d2 + +// x^171072 mod p(x), x^171008 mod p(x) +DATA ·IEEEConst+1408(SB)/8, $0x00000000d50b7a5a +DATA ·IEEEConst+1416(SB)/8, $0x00000000781aee1a + +// x^170048 mod p(x), x^169984 mod p(x) +DATA ·IEEEConst+1424(SB)/8, $0x0000000002fca6e8 +DATA ·IEEEConst+1432(SB)/8, $0x00000001c4d8348c + +// x^169024 mod p(x), x^168960 mod p(x) +DATA ·IEEEConst+1440(SB)/8, $0x000000007af40044 +DATA ·IEEEConst+1448(SB)/8, $0x0000000057a40336 + +// x^168000 mod p(x), x^167936 mod p(x) +DATA ·IEEEConst+1456(SB)/8, $0x0000000016178744 +DATA ·IEEEConst+1464(SB)/8, $0x0000000085544940 + +// x^166976 mod p(x), x^166912 mod p(x) +DATA ·IEEEConst+1472(SB)/8, $0x000000014c177458 +DATA ·IEEEConst+1480(SB)/8, $0x000000019cd21e80 + +// x^165952 mod p(x), x^165888 mod p(x) +DATA ·IEEEConst+1488(SB)/8, $0x000000011b6ddf04 +DATA ·IEEEConst+1496(SB)/8, $0x000000013eb95bc0 + +// x^164928 mod p(x), x^164864 mod p(x) +DATA ·IEEEConst+1504(SB)/8, $0x00000001f3e29ccc +DATA ·IEEEConst+1512(SB)/8, $0x00000001dfc9fdfc + +// x^163904 mod p(x), x^163840 mod p(x) +DATA ·IEEEConst+1520(SB)/8, $0x0000000135ae7562 +DATA ·IEEEConst+1528(SB)/8, $0x00000000cd028bc2 + +// x^162880 mod p(x), x^162816 mod p(x) +DATA ·IEEEConst+1536(SB)/8, $0x0000000190ef812c +DATA ·IEEEConst+1544(SB)/8, $0x0000000090db8c44 + +// x^161856 mod p(x), x^161792 mod p(x) +DATA ·IEEEConst+1552(SB)/8, $0x0000000067a2c786 +DATA ·IEEEConst+1560(SB)/8, $0x000000010010a4ce + +// x^160832 mod p(x), x^160768 mod p(x) +DATA ·IEEEConst+1568(SB)/8, $0x0000000048b9496c +DATA ·IEEEConst+1576(SB)/8, $0x00000001c8f4c72c + +// x^159808 mod p(x), x^159744 mod p(x) +DATA ·IEEEConst+1584(SB)/8, $0x000000015a422de6 +DATA ·IEEEConst+1592(SB)/8, $0x000000001c26170c + +// x^158784 mod p(x), x^158720 mod p(x) +DATA ·IEEEConst+1600(SB)/8, $0x00000001ef0e3640 +DATA ·IEEEConst+1608(SB)/8, $0x00000000e3fccf68 + +// x^157760 mod p(x), x^157696 mod p(x) +DATA ·IEEEConst+1616(SB)/8, $0x00000001006d2d26 +DATA ·IEEEConst+1624(SB)/8, $0x00000000d513ed24 + +// x^156736 mod p(x), x^156672 mod p(x) +DATA ·IEEEConst+1632(SB)/8, $0x00000001170d56d6 +DATA ·IEEEConst+1640(SB)/8, $0x00000000141beada + +// x^155712 mod p(x), x^155648 mod p(x) +DATA ·IEEEConst+1648(SB)/8, $0x00000000a5fb613c +DATA ·IEEEConst+1656(SB)/8, $0x000000011071aea0 + +// x^154688 mod p(x), x^154624 mod p(x) +DATA ·IEEEConst+1664(SB)/8, $0x0000000040bbf7fc +DATA ·IEEEConst+1672(SB)/8, $0x000000012e19080a + +// x^153664 mod p(x), x^153600 mod p(x) +DATA ·IEEEConst+1680(SB)/8, $0x000000016ac3a5b2 +DATA ·IEEEConst+1688(SB)/8, $0x0000000100ecf826 + +// x^152640 mod p(x), x^152576 mod p(x) +DATA ·IEEEConst+1696(SB)/8, $0x00000000abf16230 +DATA ·IEEEConst+1704(SB)/8, $0x0000000069b09412 + +// x^151616 mod p(x), x^151552 mod p(x) +DATA ·IEEEConst+1712(SB)/8, $0x00000001ebe23fac +DATA ·IEEEConst+1720(SB)/8, $0x0000000122297bac + +// x^150592 mod p(x), x^150528 mod p(x) +DATA ·IEEEConst+1728(SB)/8, $0x000000008b6a0894 +DATA ·IEEEConst+1736(SB)/8, $0x00000000e9e4b068 + +// x^149568 mod p(x), x^149504 mod p(x) +DATA ·IEEEConst+1744(SB)/8, $0x00000001288ea478 +DATA ·IEEEConst+1752(SB)/8, $0x000000004b38651a + +// x^148544 mod p(x), x^148480 mod p(x) +DATA ·IEEEConst+1760(SB)/8, $0x000000016619c442 +DATA ·IEEEConst+1768(SB)/8, $0x00000001468360e2 + +// x^147520 mod p(x), x^147456 mod p(x) +DATA ·IEEEConst+1776(SB)/8, $0x0000000086230038 +DATA ·IEEEConst+1784(SB)/8, $0x00000000121c2408 + +// x^146496 mod p(x), x^146432 mod p(x) +DATA ·IEEEConst+1792(SB)/8, $0x000000017746a756 +DATA ·IEEEConst+1800(SB)/8, $0x00000000da7e7d08 + +// x^145472 mod p(x), x^145408 mod p(x) +DATA ·IEEEConst+1808(SB)/8, $0x0000000191b8f8f8 +DATA ·IEEEConst+1816(SB)/8, $0x00000001058d7652 + +// x^144448 mod p(x), x^144384 mod p(x) +DATA ·IEEEConst+1824(SB)/8, $0x000000008e167708 +DATA ·IEEEConst+1832(SB)/8, $0x000000014a098a90 + +// x^143424 mod p(x), x^143360 mod p(x) +DATA ·IEEEConst+1840(SB)/8, $0x0000000148b22d54 +DATA ·IEEEConst+1848(SB)/8, $0x0000000020dbe72e + +// x^142400 mod p(x), x^142336 mod p(x) +DATA ·IEEEConst+1856(SB)/8, $0x0000000044ba2c3c +DATA ·IEEEConst+1864(SB)/8, $0x000000011e7323e8 + +// x^141376 mod p(x), x^141312 mod p(x) +DATA ·IEEEConst+1872(SB)/8, $0x00000000b54d2b52 +DATA ·IEEEConst+1880(SB)/8, $0x00000000d5d4bf94 + +// x^140352 mod p(x), x^140288 mod p(x) +DATA ·IEEEConst+1888(SB)/8, $0x0000000005a4fd8a +DATA ·IEEEConst+1896(SB)/8, $0x0000000199d8746c + +// x^139328 mod p(x), x^139264 mod p(x) +DATA ·IEEEConst+1904(SB)/8, $0x0000000139f9fc46 +DATA ·IEEEConst+1912(SB)/8, $0x00000000ce9ca8a0 + +// x^138304 mod p(x), x^138240 mod p(x) +DATA ·IEEEConst+1920(SB)/8, $0x000000015a1fa824 +DATA ·IEEEConst+1928(SB)/8, $0x00000000136edece + +// x^137280 mod p(x), x^137216 mod p(x) +DATA ·IEEEConst+1936(SB)/8, $0x000000000a61ae4c +DATA ·IEEEConst+1944(SB)/8, $0x000000019b92a068 + +// x^136256 mod p(x), x^136192 mod p(x) +DATA ·IEEEConst+1952(SB)/8, $0x0000000145e9113e +DATA ·IEEEConst+1960(SB)/8, $0x0000000071d62206 + +// x^135232 mod p(x), x^135168 mod p(x) +DATA ·IEEEConst+1968(SB)/8, $0x000000006a348448 +DATA ·IEEEConst+1976(SB)/8, $0x00000000dfc50158 + +// x^134208 mod p(x), x^134144 mod p(x) +DATA ·IEEEConst+1984(SB)/8, $0x000000004d80a08c +DATA ·IEEEConst+1992(SB)/8, $0x00000001517626bc + +// x^133184 mod p(x), x^133120 mod p(x) +DATA ·IEEEConst+2000(SB)/8, $0x000000014b6837a0 +DATA ·IEEEConst+2008(SB)/8, $0x0000000148d1e4fa + +// x^132160 mod p(x), x^132096 mod p(x) +DATA ·IEEEConst+2016(SB)/8, $0x000000016896a7fc +DATA ·IEEEConst+2024(SB)/8, $0x0000000094d8266e + +// x^131136 mod p(x), x^131072 mod p(x) +DATA ·IEEEConst+2032(SB)/8, $0x000000014f187140 +DATA ·IEEEConst+2040(SB)/8, $0x00000000606c5e34 + +// x^130112 mod p(x), x^130048 mod p(x) +DATA ·IEEEConst+2048(SB)/8, $0x000000019581b9da +DATA ·IEEEConst+2056(SB)/8, $0x000000019766beaa + +// x^129088 mod p(x), x^129024 mod p(x) +DATA ·IEEEConst+2064(SB)/8, $0x00000001091bc984 +DATA ·IEEEConst+2072(SB)/8, $0x00000001d80c506c + +// x^128064 mod p(x), x^128000 mod p(x) +DATA ·IEEEConst+2080(SB)/8, $0x000000001067223c +DATA ·IEEEConst+2088(SB)/8, $0x000000001e73837c + +// x^127040 mod p(x), x^126976 mod p(x) +DATA ·IEEEConst+2096(SB)/8, $0x00000001ab16ea02 +DATA ·IEEEConst+2104(SB)/8, $0x0000000064d587de + +// x^126016 mod p(x), x^125952 mod p(x) +DATA ·IEEEConst+2112(SB)/8, $0x000000013c4598a8 +DATA ·IEEEConst+2120(SB)/8, $0x00000000f4a507b0 + +// x^124992 mod p(x), x^124928 mod p(x) +DATA ·IEEEConst+2128(SB)/8, $0x00000000b3735430 +DATA ·IEEEConst+2136(SB)/8, $0x0000000040e342fc + +// x^123968 mod p(x), x^123904 mod p(x) +DATA ·IEEEConst+2144(SB)/8, $0x00000001bb3fc0c0 +DATA ·IEEEConst+2152(SB)/8, $0x00000001d5ad9c3a + +// x^122944 mod p(x), x^122880 mod p(x) +DATA ·IEEEConst+2160(SB)/8, $0x00000001570ae19c +DATA ·IEEEConst+2168(SB)/8, $0x0000000094a691a4 + +// x^121920 mod p(x), x^121856 mod p(x) +DATA ·IEEEConst+2176(SB)/8, $0x00000001ea910712 +DATA ·IEEEConst+2184(SB)/8, $0x00000001271ecdfa + +// x^120896 mod p(x), x^120832 mod p(x) +DATA ·IEEEConst+2192(SB)/8, $0x0000000167127128 +DATA ·IEEEConst+2200(SB)/8, $0x000000009e54475a + +// x^119872 mod p(x), x^119808 mod p(x) +DATA ·IEEEConst+2208(SB)/8, $0x0000000019e790a2 +DATA ·IEEEConst+2216(SB)/8, $0x00000000c9c099ee + +// x^118848 mod p(x), x^118784 mod p(x) +DATA ·IEEEConst+2224(SB)/8, $0x000000003788f710 +DATA ·IEEEConst+2232(SB)/8, $0x000000009a2f736c + +// x^117824 mod p(x), x^117760 mod p(x) +DATA ·IEEEConst+2240(SB)/8, $0x00000001682a160e +DATA ·IEEEConst+2248(SB)/8, $0x00000000bb9f4996 + +// x^116800 mod p(x), x^116736 mod p(x) +DATA ·IEEEConst+2256(SB)/8, $0x000000007f0ebd2e +DATA ·IEEEConst+2264(SB)/8, $0x00000001db688050 + +// x^115776 mod p(x), x^115712 mod p(x) +DATA ·IEEEConst+2272(SB)/8, $0x000000002b032080 +DATA ·IEEEConst+2280(SB)/8, $0x00000000e9b10af4 + +// x^114752 mod p(x), x^114688 mod p(x) +DATA ·IEEEConst+2288(SB)/8, $0x00000000cfd1664a +DATA ·IEEEConst+2296(SB)/8, $0x000000012d4545e4 + +// x^113728 mod p(x), x^113664 mod p(x) +DATA ·IEEEConst+2304(SB)/8, $0x00000000aa1181c2 +DATA ·IEEEConst+2312(SB)/8, $0x000000000361139c + +// x^112704 mod p(x), x^112640 mod p(x) +DATA ·IEEEConst+2320(SB)/8, $0x00000000ddd08002 +DATA ·IEEEConst+2328(SB)/8, $0x00000001a5a1a3a8 + +// x^111680 mod p(x), x^111616 mod p(x) +DATA ·IEEEConst+2336(SB)/8, $0x00000000e8dd0446 +DATA ·IEEEConst+2344(SB)/8, $0x000000006844e0b0 + +// x^110656 mod p(x), x^110592 mod p(x) +DATA ·IEEEConst+2352(SB)/8, $0x00000001bbd94a00 +DATA ·IEEEConst+2360(SB)/8, $0x00000000c3762f28 + +// x^109632 mod p(x), x^109568 mod p(x) +DATA ·IEEEConst+2368(SB)/8, $0x00000000ab6cd180 +DATA ·IEEEConst+2376(SB)/8, $0x00000001d26287a2 + +// x^108608 mod p(x), x^108544 mod p(x) +DATA ·IEEEConst+2384(SB)/8, $0x0000000031803ce2 +DATA ·IEEEConst+2392(SB)/8, $0x00000001f6f0bba8 + +// x^107584 mod p(x), x^107520 mod p(x) +DATA ·IEEEConst+2400(SB)/8, $0x0000000024f40b0c +DATA ·IEEEConst+2408(SB)/8, $0x000000002ffabd62 + +// x^106560 mod p(x), x^106496 mod p(x) +DATA ·IEEEConst+2416(SB)/8, $0x00000001ba1d9834 +DATA ·IEEEConst+2424(SB)/8, $0x00000000fb4516b8 + +// x^105536 mod p(x), x^105472 mod p(x) +DATA ·IEEEConst+2432(SB)/8, $0x0000000104de61aa +DATA ·IEEEConst+2440(SB)/8, $0x000000018cfa961c + +// x^104512 mod p(x), x^104448 mod p(x) +DATA ·IEEEConst+2448(SB)/8, $0x0000000113e40d46 +DATA ·IEEEConst+2456(SB)/8, $0x000000019e588d52 + +// x^103488 mod p(x), x^103424 mod p(x) +DATA ·IEEEConst+2464(SB)/8, $0x00000001415598a0 +DATA ·IEEEConst+2472(SB)/8, $0x00000001180f0bbc + +// x^102464 mod p(x), x^102400 mod p(x) +DATA ·IEEEConst+2480(SB)/8, $0x00000000bf6c8c90 +DATA ·IEEEConst+2488(SB)/8, $0x00000000e1d9177a + +// x^101440 mod p(x), x^101376 mod p(x) +DATA ·IEEEConst+2496(SB)/8, $0x00000001788b0504 +DATA ·IEEEConst+2504(SB)/8, $0x0000000105abc27c + +// x^100416 mod p(x), x^100352 mod p(x) +DATA ·IEEEConst+2512(SB)/8, $0x0000000038385d02 +DATA ·IEEEConst+2520(SB)/8, $0x00000000972e4a58 + +// x^99392 mod p(x), x^99328 mod p(x) +DATA ·IEEEConst+2528(SB)/8, $0x00000001b6c83844 +DATA ·IEEEConst+2536(SB)/8, $0x0000000183499a5e + +// x^98368 mod p(x), x^98304 mod p(x) +DATA ·IEEEConst+2544(SB)/8, $0x0000000051061a8a +DATA ·IEEEConst+2552(SB)/8, $0x00000001c96a8cca + +// x^97344 mod p(x), x^97280 mod p(x) +DATA ·IEEEConst+2560(SB)/8, $0x000000017351388a +DATA ·IEEEConst+2568(SB)/8, $0x00000001a1a5b60c + +// x^96320 mod p(x), x^96256 mod p(x) +DATA ·IEEEConst+2576(SB)/8, $0x0000000132928f92 +DATA ·IEEEConst+2584(SB)/8, $0x00000000e4b6ac9c + +// x^95296 mod p(x), x^95232 mod p(x) +DATA ·IEEEConst+2592(SB)/8, $0x00000000e6b4f48a +DATA ·IEEEConst+2600(SB)/8, $0x00000001807e7f5a + +// x^94272 mod p(x), x^94208 mod p(x) +DATA ·IEEEConst+2608(SB)/8, $0x0000000039d15e90 +DATA ·IEEEConst+2616(SB)/8, $0x000000017a7e3bc8 + +// x^93248 mod p(x), x^93184 mod p(x) +DATA ·IEEEConst+2624(SB)/8, $0x00000000312d6074 +DATA ·IEEEConst+2632(SB)/8, $0x00000000d73975da + +// x^92224 mod p(x), x^92160 mod p(x) +DATA ·IEEEConst+2640(SB)/8, $0x000000017bbb2cc4 +DATA ·IEEEConst+2648(SB)/8, $0x000000017375d038 + +// x^91200 mod p(x), x^91136 mod p(x) +DATA ·IEEEConst+2656(SB)/8, $0x000000016ded3e18 +DATA ·IEEEConst+2664(SB)/8, $0x00000000193680bc + +// x^90176 mod p(x), x^90112 mod p(x) +DATA ·IEEEConst+2672(SB)/8, $0x00000000f1638b16 +DATA ·IEEEConst+2680(SB)/8, $0x00000000999b06f6 + +// x^89152 mod p(x), x^89088 mod p(x) +DATA ·IEEEConst+2688(SB)/8, $0x00000001d38b9ecc +DATA ·IEEEConst+2696(SB)/8, $0x00000001f685d2b8 + +// x^88128 mod p(x), x^88064 mod p(x) +DATA ·IEEEConst+2704(SB)/8, $0x000000018b8d09dc +DATA ·IEEEConst+2712(SB)/8, $0x00000001f4ecbed2 + +// x^87104 mod p(x), x^87040 mod p(x) +DATA ·IEEEConst+2720(SB)/8, $0x00000000e7bc27d2 +DATA ·IEEEConst+2728(SB)/8, $0x00000000ba16f1a0 + +// x^86080 mod p(x), x^86016 mod p(x) +DATA ·IEEEConst+2736(SB)/8, $0x00000000275e1e96 +DATA ·IEEEConst+2744(SB)/8, $0x0000000115aceac4 + +// x^85056 mod p(x), x^84992 mod p(x) +DATA ·IEEEConst+2752(SB)/8, $0x00000000e2e3031e +DATA ·IEEEConst+2760(SB)/8, $0x00000001aeff6292 + +// x^84032 mod p(x), x^83968 mod p(x) +DATA ·IEEEConst+2768(SB)/8, $0x00000001041c84d8 +DATA ·IEEEConst+2776(SB)/8, $0x000000009640124c + +// x^83008 mod p(x), x^82944 mod p(x) +DATA ·IEEEConst+2784(SB)/8, $0x00000000706ce672 +DATA ·IEEEConst+2792(SB)/8, $0x0000000114f41f02 + +// x^81984 mod p(x), x^81920 mod p(x) +DATA ·IEEEConst+2800(SB)/8, $0x000000015d5070da +DATA ·IEEEConst+2808(SB)/8, $0x000000009c5f3586 + +// x^80960 mod p(x), x^80896 mod p(x) +DATA ·IEEEConst+2816(SB)/8, $0x0000000038f9493a +DATA ·IEEEConst+2824(SB)/8, $0x00000001878275fa + +// x^79936 mod p(x), x^79872 mod p(x) +DATA ·IEEEConst+2832(SB)/8, $0x00000000a3348a76 +DATA ·IEEEConst+2840(SB)/8, $0x00000000ddc42ce8 + +// x^78912 mod p(x), x^78848 mod p(x) +DATA ·IEEEConst+2848(SB)/8, $0x00000001ad0aab92 +DATA ·IEEEConst+2856(SB)/8, $0x0000000181d2c73a + +// x^77888 mod p(x), x^77824 mod p(x) +DATA ·IEEEConst+2864(SB)/8, $0x000000019e85f712 +DATA ·IEEEConst+2872(SB)/8, $0x0000000141c9320a + +// x^76864 mod p(x), x^76800 mod p(x) +DATA ·IEEEConst+2880(SB)/8, $0x000000005a871e76 +DATA ·IEEEConst+2888(SB)/8, $0x000000015235719a + +// x^75840 mod p(x), x^75776 mod p(x) +DATA ·IEEEConst+2896(SB)/8, $0x000000017249c662 +DATA ·IEEEConst+2904(SB)/8, $0x00000000be27d804 + +// x^74816 mod p(x), x^74752 mod p(x) +DATA ·IEEEConst+2912(SB)/8, $0x000000003a084712 +DATA ·IEEEConst+2920(SB)/8, $0x000000006242d45a + +// x^73792 mod p(x), x^73728 mod p(x) +DATA ·IEEEConst+2928(SB)/8, $0x00000000ed438478 +DATA ·IEEEConst+2936(SB)/8, $0x000000009a53638e + +// x^72768 mod p(x), x^72704 mod p(x) +DATA ·IEEEConst+2944(SB)/8, $0x00000000abac34cc +DATA ·IEEEConst+2952(SB)/8, $0x00000001001ecfb6 + +// x^71744 mod p(x), x^71680 mod p(x) +DATA ·IEEEConst+2960(SB)/8, $0x000000005f35ef3e +DATA ·IEEEConst+2968(SB)/8, $0x000000016d7c2d64 + +// x^70720 mod p(x), x^70656 mod p(x) +DATA ·IEEEConst+2976(SB)/8, $0x0000000047d6608c +DATA ·IEEEConst+2984(SB)/8, $0x00000001d0ce46c0 + +// x^69696 mod p(x), x^69632 mod p(x) +DATA ·IEEEConst+2992(SB)/8, $0x000000002d01470e +DATA ·IEEEConst+3000(SB)/8, $0x0000000124c907b4 + +// x^68672 mod p(x), x^68608 mod p(x) +DATA ·IEEEConst+3008(SB)/8, $0x0000000158bbc7b0 +DATA ·IEEEConst+3016(SB)/8, $0x0000000018a555ca + +// x^67648 mod p(x), x^67584 mod p(x) +DATA ·IEEEConst+3024(SB)/8, $0x00000000c0a23e8e +DATA ·IEEEConst+3032(SB)/8, $0x000000006b0980bc + +// x^66624 mod p(x), x^66560 mod p(x) +DATA ·IEEEConst+3040(SB)/8, $0x00000001ebd85c88 +DATA ·IEEEConst+3048(SB)/8, $0x000000008bbba964 + +// x^65600 mod p(x), x^65536 mod p(x) +DATA ·IEEEConst+3056(SB)/8, $0x000000019ee20bb2 +DATA ·IEEEConst+3064(SB)/8, $0x00000001070a5a1e + +// x^64576 mod p(x), x^64512 mod p(x) +DATA ·IEEEConst+3072(SB)/8, $0x00000001acabf2d6 +DATA ·IEEEConst+3080(SB)/8, $0x000000002204322a + +// x^63552 mod p(x), x^63488 mod p(x) +DATA ·IEEEConst+3088(SB)/8, $0x00000001b7963d56 +DATA ·IEEEConst+3096(SB)/8, $0x00000000a27524d0 + +// x^62528 mod p(x), x^62464 mod p(x) +DATA ·IEEEConst+3104(SB)/8, $0x000000017bffa1fe +DATA ·IEEEConst+3112(SB)/8, $0x0000000020b1e4ba + +// x^61504 mod p(x), x^61440 mod p(x) +DATA ·IEEEConst+3120(SB)/8, $0x000000001f15333e +DATA ·IEEEConst+3128(SB)/8, $0x0000000032cc27fc + +// x^60480 mod p(x), x^60416 mod p(x) +DATA ·IEEEConst+3136(SB)/8, $0x000000018593129e +DATA ·IEEEConst+3144(SB)/8, $0x0000000044dd22b8 + +// x^59456 mod p(x), x^59392 mod p(x) +DATA ·IEEEConst+3152(SB)/8, $0x000000019cb32602 +DATA ·IEEEConst+3160(SB)/8, $0x00000000dffc9e0a + +// x^58432 mod p(x), x^58368 mod p(x) +DATA ·IEEEConst+3168(SB)/8, $0x0000000142b05cc8 +DATA ·IEEEConst+3176(SB)/8, $0x00000001b7a0ed14 + +// x^57408 mod p(x), x^57344 mod p(x) +DATA ·IEEEConst+3184(SB)/8, $0x00000001be49e7a4 +DATA ·IEEEConst+3192(SB)/8, $0x00000000c7842488 + +// x^56384 mod p(x), x^56320 mod p(x) +DATA ·IEEEConst+3200(SB)/8, $0x0000000108f69d6c +DATA ·IEEEConst+3208(SB)/8, $0x00000001c02a4fee + +// x^55360 mod p(x), x^55296 mod p(x) +DATA ·IEEEConst+3216(SB)/8, $0x000000006c0971f0 +DATA ·IEEEConst+3224(SB)/8, $0x000000003c273778 + +// x^54336 mod p(x), x^54272 mod p(x) +DATA ·IEEEConst+3232(SB)/8, $0x000000005b16467a +DATA ·IEEEConst+3240(SB)/8, $0x00000001d63f8894 + +// x^53312 mod p(x), x^53248 mod p(x) +DATA ·IEEEConst+3248(SB)/8, $0x00000001551a628e +DATA ·IEEEConst+3256(SB)/8, $0x000000006be557d6 + +// x^52288 mod p(x), x^52224 mod p(x) +DATA ·IEEEConst+3264(SB)/8, $0x000000019e42ea92 +DATA ·IEEEConst+3272(SB)/8, $0x000000006a7806ea + +// x^51264 mod p(x), x^51200 mod p(x) +DATA ·IEEEConst+3280(SB)/8, $0x000000012fa83ff2 +DATA ·IEEEConst+3288(SB)/8, $0x000000016155aa0c + +// x^50240 mod p(x), x^50176 mod p(x) +DATA ·IEEEConst+3296(SB)/8, $0x000000011ca9cde0 +DATA ·IEEEConst+3304(SB)/8, $0x00000000908650ac + +// x^49216 mod p(x), x^49152 mod p(x) +DATA ·IEEEConst+3312(SB)/8, $0x00000000c8e5cd74 +DATA ·IEEEConst+3320(SB)/8, $0x00000000aa5a8084 + +// x^48192 mod p(x), x^48128 mod p(x) +DATA ·IEEEConst+3328(SB)/8, $0x0000000096c27f0c +DATA ·IEEEConst+3336(SB)/8, $0x0000000191bb500a + +// x^47168 mod p(x), x^47104 mod p(x) +DATA ·IEEEConst+3344(SB)/8, $0x000000002baed926 +DATA ·IEEEConst+3352(SB)/8, $0x0000000064e9bed0 + +// x^46144 mod p(x), x^46080 mod p(x) +DATA ·IEEEConst+3360(SB)/8, $0x000000017c8de8d2 +DATA ·IEEEConst+3368(SB)/8, $0x000000009444f302 + +// x^45120 mod p(x), x^45056 mod p(x) +DATA ·IEEEConst+3376(SB)/8, $0x00000000d43d6068 +DATA ·IEEEConst+3384(SB)/8, $0x000000019db07d3c + +// x^44096 mod p(x), x^44032 mod p(x) +DATA ·IEEEConst+3392(SB)/8, $0x00000000cb2c4b26 +DATA ·IEEEConst+3400(SB)/8, $0x00000001359e3e6e + +// x^43072 mod p(x), x^43008 mod p(x) +DATA ·IEEEConst+3408(SB)/8, $0x0000000145b8da26 +DATA ·IEEEConst+3416(SB)/8, $0x00000001e4f10dd2 + +// x^42048 mod p(x), x^41984 mod p(x) +DATA ·IEEEConst+3424(SB)/8, $0x000000018fff4b08 +DATA ·IEEEConst+3432(SB)/8, $0x0000000124f5735e + +// x^41024 mod p(x), x^40960 mod p(x) +DATA ·IEEEConst+3440(SB)/8, $0x0000000150b58ed0 +DATA ·IEEEConst+3448(SB)/8, $0x0000000124760a4c + +// x^40000 mod p(x), x^39936 mod p(x) +DATA ·IEEEConst+3456(SB)/8, $0x00000001549f39bc +DATA ·IEEEConst+3464(SB)/8, $0x000000000f1fc186 + +// x^38976 mod p(x), x^38912 mod p(x) +DATA ·IEEEConst+3472(SB)/8, $0x00000000ef4d2f42 +DATA ·IEEEConst+3480(SB)/8, $0x00000000150e4cc4 + +// x^37952 mod p(x), x^37888 mod p(x) +DATA ·IEEEConst+3488(SB)/8, $0x00000001b1468572 +DATA ·IEEEConst+3496(SB)/8, $0x000000002a6204e8 + +// x^36928 mod p(x), x^36864 mod p(x) +DATA ·IEEEConst+3504(SB)/8, $0x000000013d7403b2 +DATA ·IEEEConst+3512(SB)/8, $0x00000000beb1d432 + +// x^35904 mod p(x), x^35840 mod p(x) +DATA ·IEEEConst+3520(SB)/8, $0x00000001a4681842 +DATA ·IEEEConst+3528(SB)/8, $0x0000000135f3f1f0 + +// x^34880 mod p(x), x^34816 mod p(x) +DATA ·IEEEConst+3536(SB)/8, $0x0000000167714492 +DATA ·IEEEConst+3544(SB)/8, $0x0000000074fe2232 + +// x^33856 mod p(x), x^33792 mod p(x) +DATA ·IEEEConst+3552(SB)/8, $0x00000001e599099a +DATA ·IEEEConst+3560(SB)/8, $0x000000001ac6e2ba + +// x^32832 mod p(x), x^32768 mod p(x) +DATA ·IEEEConst+3568(SB)/8, $0x00000000fe128194 +DATA ·IEEEConst+3576(SB)/8, $0x0000000013fca91e + +// x^31808 mod p(x), x^31744 mod p(x) +DATA ·IEEEConst+3584(SB)/8, $0x0000000077e8b990 +DATA ·IEEEConst+3592(SB)/8, $0x0000000183f4931e + +// x^30784 mod p(x), x^30720 mod p(x) +DATA ·IEEEConst+3600(SB)/8, $0x00000001a267f63a +DATA ·IEEEConst+3608(SB)/8, $0x00000000b6d9b4e4 + +// x^29760 mod p(x), x^29696 mod p(x) +DATA ·IEEEConst+3616(SB)/8, $0x00000001945c245a +DATA ·IEEEConst+3624(SB)/8, $0x00000000b5188656 + +// x^28736 mod p(x), x^28672 mod p(x) +DATA ·IEEEConst+3632(SB)/8, $0x0000000149002e76 +DATA ·IEEEConst+3640(SB)/8, $0x0000000027a81a84 + +// x^27712 mod p(x), x^27648 mod p(x) +DATA ·IEEEConst+3648(SB)/8, $0x00000001bb8310a4 +DATA ·IEEEConst+3656(SB)/8, $0x0000000125699258 + +// x^26688 mod p(x), x^26624 mod p(x) +DATA ·IEEEConst+3664(SB)/8, $0x000000019ec60bcc +DATA ·IEEEConst+3672(SB)/8, $0x00000001b23de796 + +// x^25664 mod p(x), x^25600 mod p(x) +DATA ·IEEEConst+3680(SB)/8, $0x000000012d8590ae +DATA ·IEEEConst+3688(SB)/8, $0x00000000fe4365dc + +// x^24640 mod p(x), x^24576 mod p(x) +DATA ·IEEEConst+3696(SB)/8, $0x0000000065b00684 +DATA ·IEEEConst+3704(SB)/8, $0x00000000c68f497a + +// x^23616 mod p(x), x^23552 mod p(x) +DATA ·IEEEConst+3712(SB)/8, $0x000000015e5aeadc +DATA ·IEEEConst+3720(SB)/8, $0x00000000fbf521ee + +// x^22592 mod p(x), x^22528 mod p(x) +DATA ·IEEEConst+3728(SB)/8, $0x00000000b77ff2b0 +DATA ·IEEEConst+3736(SB)/8, $0x000000015eac3378 + +// x^21568 mod p(x), x^21504 mod p(x) +DATA ·IEEEConst+3744(SB)/8, $0x0000000188da2ff6 +DATA ·IEEEConst+3752(SB)/8, $0x0000000134914b90 + +// x^20544 mod p(x), x^20480 mod p(x) +DATA ·IEEEConst+3760(SB)/8, $0x0000000063da929a +DATA ·IEEEConst+3768(SB)/8, $0x0000000016335cfe + +// x^19520 mod p(x), x^19456 mod p(x) +DATA ·IEEEConst+3776(SB)/8, $0x00000001389caa80 +DATA ·IEEEConst+3784(SB)/8, $0x000000010372d10c + +// x^18496 mod p(x), x^18432 mod p(x) +DATA ·IEEEConst+3792(SB)/8, $0x000000013db599d2 +DATA ·IEEEConst+3800(SB)/8, $0x000000015097b908 + +// x^17472 mod p(x), x^17408 mod p(x) +DATA ·IEEEConst+3808(SB)/8, $0x0000000122505a86 +DATA ·IEEEConst+3816(SB)/8, $0x00000001227a7572 + +// x^16448 mod p(x), x^16384 mod p(x) +DATA ·IEEEConst+3824(SB)/8, $0x000000016bd72746 +DATA ·IEEEConst+3832(SB)/8, $0x000000009a8f75c0 + +// x^15424 mod p(x), x^15360 mod p(x) +DATA ·IEEEConst+3840(SB)/8, $0x00000001c3faf1d4 +DATA ·IEEEConst+3848(SB)/8, $0x00000000682c77a2 + +// x^14400 mod p(x), x^14336 mod p(x) +DATA ·IEEEConst+3856(SB)/8, $0x00000001111c826c +DATA ·IEEEConst+3864(SB)/8, $0x00000000231f091c + +// x^13376 mod p(x), x^13312 mod p(x) +DATA ·IEEEConst+3872(SB)/8, $0x00000000153e9fb2 +DATA ·IEEEConst+3880(SB)/8, $0x000000007d4439f2 + +// x^12352 mod p(x), x^12288 mod p(x) +DATA ·IEEEConst+3888(SB)/8, $0x000000002b1f7b60 +DATA ·IEEEConst+3896(SB)/8, $0x000000017e221efc + +// x^11328 mod p(x), x^11264 mod p(x) +DATA ·IEEEConst+3904(SB)/8, $0x00000000b1dba570 +DATA ·IEEEConst+3912(SB)/8, $0x0000000167457c38 + +// x^10304 mod p(x), x^10240 mod p(x) +DATA ·IEEEConst+3920(SB)/8, $0x00000001f6397b76 +DATA ·IEEEConst+3928(SB)/8, $0x00000000bdf081c4 + +// x^9280 mod p(x), x^9216 mod p(x) +DATA ·IEEEConst+3936(SB)/8, $0x0000000156335214 +DATA ·IEEEConst+3944(SB)/8, $0x000000016286d6b0 + +// x^8256 mod p(x), x^8192 mod p(x) +DATA ·IEEEConst+3952(SB)/8, $0x00000001d70e3986 +DATA ·IEEEConst+3960(SB)/8, $0x00000000c84f001c + +// x^7232 mod p(x), x^7168 mod p(x) +DATA ·IEEEConst+3968(SB)/8, $0x000000003701a774 +DATA ·IEEEConst+3976(SB)/8, $0x0000000064efe7c0 + +// x^6208 mod p(x), x^6144 mod p(x) +DATA ·IEEEConst+3984(SB)/8, $0x00000000ac81ef72 +DATA ·IEEEConst+3992(SB)/8, $0x000000000ac2d904 + +// x^5184 mod p(x), x^5120 mod p(x) +DATA ·IEEEConst+4000(SB)/8, $0x0000000133212464 +DATA ·IEEEConst+4008(SB)/8, $0x00000000fd226d14 + +// x^4160 mod p(x), x^4096 mod p(x) +DATA ·IEEEConst+4016(SB)/8, $0x00000000e4e45610 +DATA ·IEEEConst+4024(SB)/8, $0x000000011cfd42e0 + +// x^3136 mod p(x), x^3072 mod p(x) +DATA ·IEEEConst+4032(SB)/8, $0x000000000c1bd370 +DATA ·IEEEConst+4040(SB)/8, $0x000000016e5a5678 + +// x^2112 mod p(x), x^2048 mod p(x) +DATA ·IEEEConst+4048(SB)/8, $0x00000001a7b9e7a6 +DATA ·IEEEConst+4056(SB)/8, $0x00000001d888fe22 + +// x^1088 mod p(x), x^1024 mod p(x) +DATA ·IEEEConst+4064(SB)/8, $0x000000007d657a10 +DATA ·IEEEConst+4072(SB)/8, $0x00000001af77fcd4 + +// x^2048 mod p(x), x^2016 mod p(x), x^1984 mod p(x), x^1952 mod p(x) +DATA ·IEEEConst+4080(SB)/8, $0x99168a18ec447f11 +DATA ·IEEEConst+4088(SB)/8, $0xed837b2613e8221e + +// x^1920 mod p(x), x^1888 mod p(x), x^1856 mod p(x), x^1824 mod p(x) +DATA ·IEEEConst+4096(SB)/8, $0xe23e954e8fd2cd3c +DATA ·IEEEConst+4104(SB)/8, $0xc8acdd8147b9ce5a + +// x^1792 mod p(x), x^1760 mod p(x), x^1728 mod p(x), x^1696 mod p(x) +DATA ·IEEEConst+4112(SB)/8, $0x92f8befe6b1d2b53 +DATA ·IEEEConst+4120(SB)/8, $0xd9ad6d87d4277e25 + +// x^1664 mod p(x), x^1632 mod p(x), x^1600 mod p(x), x^1568 mod p(x) +DATA ·IEEEConst+4128(SB)/8, $0xf38a3556291ea462 +DATA ·IEEEConst+4136(SB)/8, $0xc10ec5e033fbca3b + +// x^1536 mod p(x), x^1504 mod p(x), x^1472 mod p(x), x^1440 mod p(x) +DATA ·IEEEConst+4144(SB)/8, $0x974ac56262b6ca4b +DATA ·IEEEConst+4152(SB)/8, $0xc0b55b0e82e02e2f + +// x^1408 mod p(x), x^1376 mod p(x), x^1344 mod p(x), x^1312 mod p(x) +DATA ·IEEEConst+4160(SB)/8, $0x855712b3784d2a56 +DATA ·IEEEConst+4168(SB)/8, $0x71aa1df0e172334d + +// x^1280 mod p(x), x^1248 mod p(x), x^1216 mod p(x), x^1184 mod p(x) +DATA ·IEEEConst+4176(SB)/8, $0xa5abe9f80eaee722 +DATA ·IEEEConst+4184(SB)/8, $0xfee3053e3969324d + +// x^1152 mod p(x), x^1120 mod p(x), x^1088 mod p(x), x^1056 mod p(x) +DATA ·IEEEConst+4192(SB)/8, $0x1fa0943ddb54814c +DATA ·IEEEConst+4200(SB)/8, $0xf44779b93eb2bd08 + +// x^1024 mod p(x), x^992 mod p(x), x^960 mod p(x), x^928 mod p(x) +DATA ·IEEEConst+4208(SB)/8, $0xa53ff440d7bbfe6a +DATA ·IEEEConst+4216(SB)/8, $0xf5449b3f00cc3374 + +// x^896 mod p(x), x^864 mod p(x), x^832 mod p(x), x^800 mod p(x) +DATA ·IEEEConst+4224(SB)/8, $0xebe7e3566325605c +DATA ·IEEEConst+4232(SB)/8, $0x6f8346e1d777606e + +// x^768 mod p(x), x^736 mod p(x), x^704 mod p(x), x^672 mod p(x) +DATA ·IEEEConst+4240(SB)/8, $0xc65a272ce5b592b8 +DATA ·IEEEConst+4248(SB)/8, $0xe3ab4f2ac0b95347 + +// x^640 mod p(x), x^608 mod p(x), x^576 mod p(x), x^544 mod p(x) +DATA ·IEEEConst+4256(SB)/8, $0x5705a9ca4721589f +DATA ·IEEEConst+4264(SB)/8, $0xaa2215ea329ecc11 + +// x^512 mod p(x), x^480 mod p(x), x^448 mod p(x), x^416 mod p(x) +DATA ·IEEEConst+4272(SB)/8, $0xe3720acb88d14467 +DATA ·IEEEConst+4280(SB)/8, $0x1ed8f66ed95efd26 + +// x^384 mod p(x), x^352 mod p(x), x^320 mod p(x), x^288 mod p(x) +DATA ·IEEEConst+4288(SB)/8, $0xba1aca0315141c31 +DATA ·IEEEConst+4296(SB)/8, $0x78ed02d5a700e96a + +// x^256 mod p(x), x^224 mod p(x), x^192 mod p(x), x^160 mod p(x) +DATA ·IEEEConst+4304(SB)/8, $0xad2a31b3ed627dae +DATA ·IEEEConst+4312(SB)/8, $0xba8ccbe832b39da3 + +// x^128 mod p(x), x^96 mod p(x), x^64 mod p(x), x^32 mod p(x) +DATA ·IEEEConst+4320(SB)/8, $0x6655004fa06a2517 +DATA ·IEEEConst+4328(SB)/8, $0xedb88320b1e6b092 + +GLOBL ·IEEEConst(SB), RODATA, $4336 + +// Barrett constant m - (4^32)/n +DATA ·IEEEBarConst(SB)/8, $0x00000001f7011641 +DATA ·IEEEBarConst+8(SB)/8, $0x0000000000000000 +DATA ·IEEEBarConst+16(SB)/8, $0x00000001db710641 +DATA ·IEEEBarConst+24(SB)/8, $0x0000000000000000 +GLOBL ·IEEEBarConst(SB), RODATA, $32 + +// Reduce 262144 kbits to 1024 bits +// x^261184 mod p(x), x^261120 mod p(x) +DATA ·CastConst+0(SB)/8, $0x000000009c37c408 +DATA ·CastConst+8(SB)/8, $0x00000000b6ca9e20 + +// x^260160 mod p(x), x^260096 mod p(x) +DATA ·CastConst+16(SB)/8, $0x00000001b51df26c +DATA ·CastConst+24(SB)/8, $0x00000000350249a8 + +// x^259136 mod p(x), x^259072 mod p(x) +DATA ·CastConst+32(SB)/8, $0x000000000724b9d0 +DATA ·CastConst+40(SB)/8, $0x00000001862dac54 + +// x^258112 mod p(x), x^258048 mod p(x) +DATA ·CastConst+48(SB)/8, $0x00000001c00532fe +DATA ·CastConst+56(SB)/8, $0x00000001d87fb48c + +// x^257088 mod p(x), x^257024 mod p(x) +DATA ·CastConst+64(SB)/8, $0x00000000f05a9362 +DATA ·CastConst+72(SB)/8, $0x00000001f39b699e + +// x^256064 mod p(x), x^256000 mod p(x) +DATA ·CastConst+80(SB)/8, $0x00000001e1007970 +DATA ·CastConst+88(SB)/8, $0x0000000101da11b4 + +// x^255040 mod p(x), x^254976 mod p(x) +DATA ·CastConst+96(SB)/8, $0x00000000a57366ee +DATA ·CastConst+104(SB)/8, $0x00000001cab571e0 + +// x^254016 mod p(x), x^253952 mod p(x) +DATA ·CastConst+112(SB)/8, $0x0000000192011284 +DATA ·CastConst+120(SB)/8, $0x00000000c7020cfe + +// x^252992 mod p(x), x^252928 mod p(x) +DATA ·CastConst+128(SB)/8, $0x0000000162716d9a +DATA ·CastConst+136(SB)/8, $0x00000000cdaed1ae + +// x^251968 mod p(x), x^251904 mod p(x) +DATA ·CastConst+144(SB)/8, $0x00000000cd97ecde +DATA ·CastConst+152(SB)/8, $0x00000001e804effc + +// x^250944 mod p(x), x^250880 mod p(x) +DATA ·CastConst+160(SB)/8, $0x0000000058812bc0 +DATA ·CastConst+168(SB)/8, $0x0000000077c3ea3a + +// x^249920 mod p(x), x^249856 mod p(x) +DATA ·CastConst+176(SB)/8, $0x0000000088b8c12e +DATA ·CastConst+184(SB)/8, $0x0000000068df31b4 + +// x^248896 mod p(x), x^248832 mod p(x) +DATA ·CastConst+192(SB)/8, $0x00000001230b234c +DATA ·CastConst+200(SB)/8, $0x00000000b059b6c2 + +// x^247872 mod p(x), x^247808 mod p(x) +DATA ·CastConst+208(SB)/8, $0x00000001120b416e +DATA ·CastConst+216(SB)/8, $0x0000000145fb8ed8 + +// x^246848 mod p(x), x^246784 mod p(x) +DATA ·CastConst+224(SB)/8, $0x00000001974aecb0 +DATA ·CastConst+232(SB)/8, $0x00000000cbc09168 + +// x^245824 mod p(x), x^245760 mod p(x) +DATA ·CastConst+240(SB)/8, $0x000000008ee3f226 +DATA ·CastConst+248(SB)/8, $0x000000005ceeedc2 + +// x^244800 mod p(x), x^244736 mod p(x) +DATA ·CastConst+256(SB)/8, $0x00000001089aba9a +DATA ·CastConst+264(SB)/8, $0x0000000047d74e86 + +// x^243776 mod p(x), x^243712 mod p(x) +DATA ·CastConst+272(SB)/8, $0x0000000065113872 +DATA ·CastConst+280(SB)/8, $0x00000001407e9e22 + +// x^242752 mod p(x), x^242688 mod p(x) +DATA ·CastConst+288(SB)/8, $0x000000005c07ec10 +DATA ·CastConst+296(SB)/8, $0x00000001da967bda + +// x^241728 mod p(x), x^241664 mod p(x) +DATA ·CastConst+304(SB)/8, $0x0000000187590924 +DATA ·CastConst+312(SB)/8, $0x000000006c898368 + +// x^240704 mod p(x), x^240640 mod p(x) +DATA ·CastConst+320(SB)/8, $0x00000000e35da7c6 +DATA ·CastConst+328(SB)/8, $0x00000000f2d14c98 + +// x^239680 mod p(x), x^239616 mod p(x) +DATA ·CastConst+336(SB)/8, $0x000000000415855a +DATA ·CastConst+344(SB)/8, $0x00000001993c6ad4 + +// x^238656 mod p(x), x^238592 mod p(x) +DATA ·CastConst+352(SB)/8, $0x0000000073617758 +DATA ·CastConst+360(SB)/8, $0x000000014683d1ac + +// x^237632 mod p(x), x^237568 mod p(x) +DATA ·CastConst+368(SB)/8, $0x0000000176021d28 +DATA ·CastConst+376(SB)/8, $0x00000001a7c93e6c + +// x^236608 mod p(x), x^236544 mod p(x) +DATA ·CastConst+384(SB)/8, $0x00000001c358fd0a +DATA ·CastConst+392(SB)/8, $0x000000010211e90a + +// x^235584 mod p(x), x^235520 mod p(x) +DATA ·CastConst+400(SB)/8, $0x00000001ff7a2c18 +DATA ·CastConst+408(SB)/8, $0x000000001119403e + +// x^234560 mod p(x), x^234496 mod p(x) +DATA ·CastConst+416(SB)/8, $0x00000000f2d9f7e4 +DATA ·CastConst+424(SB)/8, $0x000000001c3261aa + +// x^233536 mod p(x), x^233472 mod p(x) +DATA ·CastConst+432(SB)/8, $0x000000016cf1f9c8 +DATA ·CastConst+440(SB)/8, $0x000000014e37a634 + +// x^232512 mod p(x), x^232448 mod p(x) +DATA ·CastConst+448(SB)/8, $0x000000010af9279a +DATA ·CastConst+456(SB)/8, $0x0000000073786c0c + +// x^231488 mod p(x), x^231424 mod p(x) +DATA ·CastConst+464(SB)/8, $0x0000000004f101e8 +DATA ·CastConst+472(SB)/8, $0x000000011dc037f8 + +// x^230464 mod p(x), x^230400 mod p(x) +DATA ·CastConst+480(SB)/8, $0x0000000070bcf184 +DATA ·CastConst+488(SB)/8, $0x0000000031433dfc + +// x^229440 mod p(x), x^229376 mod p(x) +DATA ·CastConst+496(SB)/8, $0x000000000a8de642 +DATA ·CastConst+504(SB)/8, $0x000000009cde8348 + +// x^228416 mod p(x), x^228352 mod p(x) +DATA ·CastConst+512(SB)/8, $0x0000000062ea130c +DATA ·CastConst+520(SB)/8, $0x0000000038d3c2a6 + +// x^227392 mod p(x), x^227328 mod p(x) +DATA ·CastConst+528(SB)/8, $0x00000001eb31cbb2 +DATA ·CastConst+536(SB)/8, $0x000000011b25f260 + +// x^226368 mod p(x), x^226304 mod p(x) +DATA ·CastConst+544(SB)/8, $0x0000000170783448 +DATA ·CastConst+552(SB)/8, $0x000000001629e6f0 + +// x^225344 mod p(x), x^225280 mod p(x) +DATA ·CastConst+560(SB)/8, $0x00000001a684b4c6 +DATA ·CastConst+568(SB)/8, $0x0000000160838b4c + +// x^224320 mod p(x), x^224256 mod p(x) +DATA ·CastConst+576(SB)/8, $0x00000000253ca5b4 +DATA ·CastConst+584(SB)/8, $0x000000007a44011c + +// x^223296 mod p(x), x^223232 mod p(x) +DATA ·CastConst+592(SB)/8, $0x0000000057b4b1e2 +DATA ·CastConst+600(SB)/8, $0x00000000226f417a + +// x^222272 mod p(x), x^222208 mod p(x) +DATA ·CastConst+608(SB)/8, $0x00000000b6bd084c +DATA ·CastConst+616(SB)/8, $0x0000000045eb2eb4 + +// x^221248 mod p(x), x^221184 mod p(x) +DATA ·CastConst+624(SB)/8, $0x0000000123c2d592 +DATA ·CastConst+632(SB)/8, $0x000000014459d70c + +// x^220224 mod p(x), x^220160 mod p(x) +DATA ·CastConst+640(SB)/8, $0x00000000159dafce +DATA ·CastConst+648(SB)/8, $0x00000001d406ed82 + +// x^219200 mod p(x), x^219136 mod p(x) +DATA ·CastConst+656(SB)/8, $0x0000000127e1a64e +DATA ·CastConst+664(SB)/8, $0x0000000160c8e1a8 + +// x^218176 mod p(x), x^218112 mod p(x) +DATA ·CastConst+672(SB)/8, $0x0000000056860754 +DATA ·CastConst+680(SB)/8, $0x0000000027ba8098 + +// x^217152 mod p(x), x^217088 mod p(x) +DATA ·CastConst+688(SB)/8, $0x00000001e661aae8 +DATA ·CastConst+696(SB)/8, $0x000000006d92d018 + +// x^216128 mod p(x), x^216064 mod p(x) +DATA ·CastConst+704(SB)/8, $0x00000000f82c6166 +DATA ·CastConst+712(SB)/8, $0x000000012ed7e3f2 + +// x^215104 mod p(x), x^215040 mod p(x) +DATA ·CastConst+720(SB)/8, $0x00000000c4f9c7ae +DATA ·CastConst+728(SB)/8, $0x000000002dc87788 + +// x^214080 mod p(x), x^214016 mod p(x) +DATA ·CastConst+736(SB)/8, $0x0000000074203d20 +DATA ·CastConst+744(SB)/8, $0x0000000018240bb8 + +// x^213056 mod p(x), x^212992 mod p(x) +DATA ·CastConst+752(SB)/8, $0x0000000198173052 +DATA ·CastConst+760(SB)/8, $0x000000001ad38158 + +// x^212032 mod p(x), x^211968 mod p(x) +DATA ·CastConst+768(SB)/8, $0x00000001ce8aba54 +DATA ·CastConst+776(SB)/8, $0x00000001396b78f2 + +// x^211008 mod p(x), x^210944 mod p(x) +DATA ·CastConst+784(SB)/8, $0x00000001850d5d94 +DATA ·CastConst+792(SB)/8, $0x000000011a681334 + +// x^209984 mod p(x), x^209920 mod p(x) +DATA ·CastConst+800(SB)/8, $0x00000001d609239c +DATA ·CastConst+808(SB)/8, $0x000000012104732e + +// x^208960 mod p(x), x^208896 mod p(x) +DATA ·CastConst+816(SB)/8, $0x000000001595f048 +DATA ·CastConst+824(SB)/8, $0x00000000a140d90c + +// x^207936 mod p(x), x^207872 mod p(x) +DATA ·CastConst+832(SB)/8, $0x0000000042ccee08 +DATA ·CastConst+840(SB)/8, $0x00000001b7215eda + +// x^206912 mod p(x), x^206848 mod p(x) +DATA ·CastConst+848(SB)/8, $0x000000010a389d74 +DATA ·CastConst+856(SB)/8, $0x00000001aaf1df3c + +// x^205888 mod p(x), x^205824 mod p(x) +DATA ·CastConst+864(SB)/8, $0x000000012a840da6 +DATA ·CastConst+872(SB)/8, $0x0000000029d15b8a + +// x^204864 mod p(x), x^204800 mod p(x) +DATA ·CastConst+880(SB)/8, $0x000000001d181c0c +DATA ·CastConst+888(SB)/8, $0x00000000f1a96922 + +// x^203840 mod p(x), x^203776 mod p(x) +DATA ·CastConst+896(SB)/8, $0x0000000068b7d1f6 +DATA ·CastConst+904(SB)/8, $0x00000001ac80d03c + +// x^202816 mod p(x), x^202752 mod p(x) +DATA ·CastConst+912(SB)/8, $0x000000005b0f14fc +DATA ·CastConst+920(SB)/8, $0x000000000f11d56a + +// x^201792 mod p(x), x^201728 mod p(x) +DATA ·CastConst+928(SB)/8, $0x0000000179e9e730 +DATA ·CastConst+936(SB)/8, $0x00000001f1c022a2 + +// x^200768 mod p(x), x^200704 mod p(x) +DATA ·CastConst+944(SB)/8, $0x00000001ce1368d6 +DATA ·CastConst+952(SB)/8, $0x0000000173d00ae2 + +// x^199744 mod p(x), x^199680 mod p(x) +DATA ·CastConst+960(SB)/8, $0x0000000112c3a84c +DATA ·CastConst+968(SB)/8, $0x00000001d4ffe4ac + +// x^198720 mod p(x), x^198656 mod p(x) +DATA ·CastConst+976(SB)/8, $0x00000000de940fee +DATA ·CastConst+984(SB)/8, $0x000000016edc5ae4 + +// x^197696 mod p(x), x^197632 mod p(x) +DATA ·CastConst+992(SB)/8, $0x00000000fe896b7e +DATA ·CastConst+1000(SB)/8, $0x00000001f1a02140 + +// x^196672 mod p(x), x^196608 mod p(x) +DATA ·CastConst+1008(SB)/8, $0x00000001f797431c +DATA ·CastConst+1016(SB)/8, $0x00000000ca0b28a0 + +// x^195648 mod p(x), x^195584 mod p(x) +DATA ·CastConst+1024(SB)/8, $0x0000000053e989ba +DATA ·CastConst+1032(SB)/8, $0x00000001928e30a2 + +// x^194624 mod p(x), x^194560 mod p(x) +DATA ·CastConst+1040(SB)/8, $0x000000003920cd16 +DATA ·CastConst+1048(SB)/8, $0x0000000097b1b002 + +// x^193600 mod p(x), x^193536 mod p(x) +DATA ·CastConst+1056(SB)/8, $0x00000001e6f579b8 +DATA ·CastConst+1064(SB)/8, $0x00000000b15bf906 + +// x^192576 mod p(x), x^192512 mod p(x) +DATA ·CastConst+1072(SB)/8, $0x000000007493cb0a +DATA ·CastConst+1080(SB)/8, $0x00000000411c5d52 + +// x^191552 mod p(x), x^191488 mod p(x) +DATA ·CastConst+1088(SB)/8, $0x00000001bdd376d8 +DATA ·CastConst+1096(SB)/8, $0x00000001c36f3300 + +// x^190528 mod p(x), x^190464 mod p(x) +DATA ·CastConst+1104(SB)/8, $0x000000016badfee6 +DATA ·CastConst+1112(SB)/8, $0x00000001119227e0 + +// x^189504 mod p(x), x^189440 mod p(x) +DATA ·CastConst+1120(SB)/8, $0x0000000071de5c58 +DATA ·CastConst+1128(SB)/8, $0x00000000114d4702 + +// x^188480 mod p(x), x^188416 mod p(x) +DATA ·CastConst+1136(SB)/8, $0x00000000453f317c +DATA ·CastConst+1144(SB)/8, $0x00000000458b5b98 + +// x^187456 mod p(x), x^187392 mod p(x) +DATA ·CastConst+1152(SB)/8, $0x0000000121675cce +DATA ·CastConst+1160(SB)/8, $0x000000012e31fb8e + +// x^186432 mod p(x), x^186368 mod p(x) +DATA ·CastConst+1168(SB)/8, $0x00000001f409ee92 +DATA ·CastConst+1176(SB)/8, $0x000000005cf619d8 + +// x^185408 mod p(x), x^185344 mod p(x) +DATA ·CastConst+1184(SB)/8, $0x00000000f36b9c88 +DATA ·CastConst+1192(SB)/8, $0x0000000063f4d8b2 + +// x^184384 mod p(x), x^184320 mod p(x) +DATA ·CastConst+1200(SB)/8, $0x0000000036b398f4 +DATA ·CastConst+1208(SB)/8, $0x000000004138dc8a + +// x^183360 mod p(x), x^183296 mod p(x) +DATA ·CastConst+1216(SB)/8, $0x00000001748f9adc +DATA ·CastConst+1224(SB)/8, $0x00000001d29ee8e0 + +// x^182336 mod p(x), x^182272 mod p(x) +DATA ·CastConst+1232(SB)/8, $0x00000001be94ec00 +DATA ·CastConst+1240(SB)/8, $0x000000006a08ace8 + +// x^181312 mod p(x), x^181248 mod p(x) +DATA ·CastConst+1248(SB)/8, $0x00000000b74370d6 +DATA ·CastConst+1256(SB)/8, $0x0000000127d42010 + +// x^180288 mod p(x), x^180224 mod p(x) +DATA ·CastConst+1264(SB)/8, $0x00000001174d0b98 +DATA ·CastConst+1272(SB)/8, $0x0000000019d76b62 + +// x^179264 mod p(x), x^179200 mod p(x) +DATA ·CastConst+1280(SB)/8, $0x00000000befc06a4 +DATA ·CastConst+1288(SB)/8, $0x00000001b1471f6e + +// x^178240 mod p(x), x^178176 mod p(x) +DATA ·CastConst+1296(SB)/8, $0x00000001ae125288 +DATA ·CastConst+1304(SB)/8, $0x00000001f64c19cc + +// x^177216 mod p(x), x^177152 mod p(x) +DATA ·CastConst+1312(SB)/8, $0x0000000095c19b34 +DATA ·CastConst+1320(SB)/8, $0x00000000003c0ea0 + +// x^176192 mod p(x), x^176128 mod p(x) +DATA ·CastConst+1328(SB)/8, $0x00000001a78496f2 +DATA ·CastConst+1336(SB)/8, $0x000000014d73abf6 + +// x^175168 mod p(x), x^175104 mod p(x) +DATA ·CastConst+1344(SB)/8, $0x00000001ac5390a0 +DATA ·CastConst+1352(SB)/8, $0x00000001620eb844 + +// x^174144 mod p(x), x^174080 mod p(x) +DATA ·CastConst+1360(SB)/8, $0x000000002a80ed6e +DATA ·CastConst+1368(SB)/8, $0x0000000147655048 + +// x^173120 mod p(x), x^173056 mod p(x) +DATA ·CastConst+1376(SB)/8, $0x00000001fa9b0128 +DATA ·CastConst+1384(SB)/8, $0x0000000067b5077e + +// x^172096 mod p(x), x^172032 mod p(x) +DATA ·CastConst+1392(SB)/8, $0x00000001ea94929e +DATA ·CastConst+1400(SB)/8, $0x0000000010ffe206 + +// x^171072 mod p(x), x^171008 mod p(x) +DATA ·CastConst+1408(SB)/8, $0x0000000125f4305c +DATA ·CastConst+1416(SB)/8, $0x000000000fee8f1e + +// x^170048 mod p(x), x^169984 mod p(x) +DATA ·CastConst+1424(SB)/8, $0x00000001471e2002 +DATA ·CastConst+1432(SB)/8, $0x00000001da26fbae + +// x^169024 mod p(x), x^168960 mod p(x) +DATA ·CastConst+1440(SB)/8, $0x0000000132d2253a +DATA ·CastConst+1448(SB)/8, $0x00000001b3a8bd88 + +// x^168000 mod p(x), x^167936 mod p(x) +DATA ·CastConst+1456(SB)/8, $0x00000000f26b3592 +DATA ·CastConst+1464(SB)/8, $0x00000000e8f3898e + +// x^166976 mod p(x), x^166912 mod p(x) +DATA ·CastConst+1472(SB)/8, $0x00000000bc8b67b0 +DATA ·CastConst+1480(SB)/8, $0x00000000b0d0d28c + +// x^165952 mod p(x), x^165888 mod p(x) +DATA ·CastConst+1488(SB)/8, $0x000000013a826ef2 +DATA ·CastConst+1496(SB)/8, $0x0000000030f2a798 + +// x^164928 mod p(x), x^164864 mod p(x) +DATA ·CastConst+1504(SB)/8, $0x0000000081482c84 +DATA ·CastConst+1512(SB)/8, $0x000000000fba1002 + +// x^163904 mod p(x), x^163840 mod p(x) +DATA ·CastConst+1520(SB)/8, $0x00000000e77307c2 +DATA ·CastConst+1528(SB)/8, $0x00000000bdb9bd72 + +// x^162880 mod p(x), x^162816 mod p(x) +DATA ·CastConst+1536(SB)/8, $0x00000000d4a07ec8 +DATA ·CastConst+1544(SB)/8, $0x0000000075d3bf5a + +// x^161856 mod p(x), x^161792 mod p(x) +DATA ·CastConst+1552(SB)/8, $0x0000000017102100 +DATA ·CastConst+1560(SB)/8, $0x00000000ef1f98a0 + +// x^160832 mod p(x), x^160768 mod p(x) +DATA ·CastConst+1568(SB)/8, $0x00000000db406486 +DATA ·CastConst+1576(SB)/8, $0x00000000689c7602 + +// x^159808 mod p(x), x^159744 mod p(x) +DATA ·CastConst+1584(SB)/8, $0x0000000192db7f88 +DATA ·CastConst+1592(SB)/8, $0x000000016d5fa5fe + +// x^158784 mod p(x), x^158720 mod p(x) +DATA ·CastConst+1600(SB)/8, $0x000000018bf67b1e +DATA ·CastConst+1608(SB)/8, $0x00000001d0d2b9ca + +// x^157760 mod p(x), x^157696 mod p(x) +DATA ·CastConst+1616(SB)/8, $0x000000007c09163e +DATA ·CastConst+1624(SB)/8, $0x0000000041e7b470 + +// x^156736 mod p(x), x^156672 mod p(x) +DATA ·CastConst+1632(SB)/8, $0x000000000adac060 +DATA ·CastConst+1640(SB)/8, $0x00000001cbb6495e + +// x^155712 mod p(x), x^155648 mod p(x) +DATA ·CastConst+1648(SB)/8, $0x00000000bd8316ae +DATA ·CastConst+1656(SB)/8, $0x000000010052a0b0 + +// x^154688 mod p(x), x^154624 mod p(x) +DATA ·CastConst+1664(SB)/8, $0x000000019f09ab54 +DATA ·CastConst+1672(SB)/8, $0x00000001d8effb5c + +// x^153664 mod p(x), x^153600 mod p(x) +DATA ·CastConst+1680(SB)/8, $0x0000000125155542 +DATA ·CastConst+1688(SB)/8, $0x00000001d969853c + +// x^152640 mod p(x), x^152576 mod p(x) +DATA ·CastConst+1696(SB)/8, $0x000000018fdb5882 +DATA ·CastConst+1704(SB)/8, $0x00000000523ccce2 + +// x^151616 mod p(x), x^151552 mod p(x) +DATA ·CastConst+1712(SB)/8, $0x00000000e794b3f4 +DATA ·CastConst+1720(SB)/8, $0x000000001e2436bc + +// x^150592 mod p(x), x^150528 mod p(x) +DATA ·CastConst+1728(SB)/8, $0x000000016f9bb022 +DATA ·CastConst+1736(SB)/8, $0x00000000ddd1c3a2 + +// x^149568 mod p(x), x^149504 mod p(x) +DATA ·CastConst+1744(SB)/8, $0x00000000290c9978 +DATA ·CastConst+1752(SB)/8, $0x0000000019fcfe38 + +// x^148544 mod p(x), x^148480 mod p(x) +DATA ·CastConst+1760(SB)/8, $0x0000000083c0f350 +DATA ·CastConst+1768(SB)/8, $0x00000001ce95db64 + +// x^147520 mod p(x), x^147456 mod p(x) +DATA ·CastConst+1776(SB)/8, $0x0000000173ea6628 +DATA ·CastConst+1784(SB)/8, $0x00000000af582806 + +// x^146496 mod p(x), x^146432 mod p(x) +DATA ·CastConst+1792(SB)/8, $0x00000001c8b4e00a +DATA ·CastConst+1800(SB)/8, $0x00000001006388f6 + +// x^145472 mod p(x), x^145408 mod p(x) +DATA ·CastConst+1808(SB)/8, $0x00000000de95d6aa +DATA ·CastConst+1816(SB)/8, $0x0000000179eca00a + +// x^144448 mod p(x), x^144384 mod p(x) +DATA ·CastConst+1824(SB)/8, $0x000000010b7f7248 +DATA ·CastConst+1832(SB)/8, $0x0000000122410a6a + +// x^143424 mod p(x), x^143360 mod p(x) +DATA ·CastConst+1840(SB)/8, $0x00000001326e3a06 +DATA ·CastConst+1848(SB)/8, $0x000000004288e87c + +// x^142400 mod p(x), x^142336 mod p(x) +DATA ·CastConst+1856(SB)/8, $0x00000000bb62c2e6 +DATA ·CastConst+1864(SB)/8, $0x000000016c5490da + +// x^141376 mod p(x), x^141312 mod p(x) +DATA ·CastConst+1872(SB)/8, $0x0000000156a4b2c2 +DATA ·CastConst+1880(SB)/8, $0x00000000d1c71f6e + +// x^140352 mod p(x), x^140288 mod p(x) +DATA ·CastConst+1888(SB)/8, $0x000000011dfe763a +DATA ·CastConst+1896(SB)/8, $0x00000001b4ce08a6 + +// x^139328 mod p(x), x^139264 mod p(x) +DATA ·CastConst+1904(SB)/8, $0x000000007bcca8e2 +DATA ·CastConst+1912(SB)/8, $0x00000001466ba60c + +// x^138304 mod p(x), x^138240 mod p(x) +DATA ·CastConst+1920(SB)/8, $0x0000000186118faa +DATA ·CastConst+1928(SB)/8, $0x00000001f6c488a4 + +// x^137280 mod p(x), x^137216 mod p(x) +DATA ·CastConst+1936(SB)/8, $0x0000000111a65a88 +DATA ·CastConst+1944(SB)/8, $0x000000013bfb0682 + +// x^136256 mod p(x), x^136192 mod p(x) +DATA ·CastConst+1952(SB)/8, $0x000000003565e1c4 +DATA ·CastConst+1960(SB)/8, $0x00000000690e9e54 + +// x^135232 mod p(x), x^135168 mod p(x) +DATA ·CastConst+1968(SB)/8, $0x000000012ed02a82 +DATA ·CastConst+1976(SB)/8, $0x00000000281346b6 + +// x^134208 mod p(x), x^134144 mod p(x) +DATA ·CastConst+1984(SB)/8, $0x00000000c486ecfc +DATA ·CastConst+1992(SB)/8, $0x0000000156464024 + +// x^133184 mod p(x), x^133120 mod p(x) +DATA ·CastConst+2000(SB)/8, $0x0000000001b951b2 +DATA ·CastConst+2008(SB)/8, $0x000000016063a8dc + +// x^132160 mod p(x), x^132096 mod p(x) +DATA ·CastConst+2016(SB)/8, $0x0000000048143916 +DATA ·CastConst+2024(SB)/8, $0x0000000116a66362 + +// x^131136 mod p(x), x^131072 mod p(x) +DATA ·CastConst+2032(SB)/8, $0x00000001dc2ae124 +DATA ·CastConst+2040(SB)/8, $0x000000017e8aa4d2 + +// x^130112 mod p(x), x^130048 mod p(x) +DATA ·CastConst+2048(SB)/8, $0x00000001416c58d6 +DATA ·CastConst+2056(SB)/8, $0x00000001728eb10c + +// x^129088 mod p(x), x^129024 mod p(x) +DATA ·CastConst+2064(SB)/8, $0x00000000a479744a +DATA ·CastConst+2072(SB)/8, $0x00000001b08fd7fa + +// x^128064 mod p(x), x^128000 mod p(x) +DATA ·CastConst+2080(SB)/8, $0x0000000096ca3a26 +DATA ·CastConst+2088(SB)/8, $0x00000001092a16e8 + +// x^127040 mod p(x), x^126976 mod p(x) +DATA ·CastConst+2096(SB)/8, $0x00000000ff223d4e +DATA ·CastConst+2104(SB)/8, $0x00000000a505637c + +// x^126016 mod p(x), x^125952 mod p(x) +DATA ·CastConst+2112(SB)/8, $0x000000010e84da42 +DATA ·CastConst+2120(SB)/8, $0x00000000d94869b2 + +// x^124992 mod p(x), x^124928 mod p(x) +DATA ·CastConst+2128(SB)/8, $0x00000001b61ba3d0 +DATA ·CastConst+2136(SB)/8, $0x00000001c8b203ae + +// x^123968 mod p(x), x^123904 mod p(x) +DATA ·CastConst+2144(SB)/8, $0x00000000680f2de8 +DATA ·CastConst+2152(SB)/8, $0x000000005704aea0 + +// x^122944 mod p(x), x^122880 mod p(x) +DATA ·CastConst+2160(SB)/8, $0x000000008772a9a8 +DATA ·CastConst+2168(SB)/8, $0x000000012e295fa2 + +// x^121920 mod p(x), x^121856 mod p(x) +DATA ·CastConst+2176(SB)/8, $0x0000000155f295bc +DATA ·CastConst+2184(SB)/8, $0x000000011d0908bc + +// x^120896 mod p(x), x^120832 mod p(x) +DATA ·CastConst+2192(SB)/8, $0x00000000595f9282 +DATA ·CastConst+2200(SB)/8, $0x0000000193ed97ea + +// x^119872 mod p(x), x^119808 mod p(x) +DATA ·CastConst+2208(SB)/8, $0x0000000164b1c25a +DATA ·CastConst+2216(SB)/8, $0x000000013a0f1c52 + +// x^118848 mod p(x), x^118784 mod p(x) +DATA ·CastConst+2224(SB)/8, $0x00000000fbd67c50 +DATA ·CastConst+2232(SB)/8, $0x000000010c2c40c0 + +// x^117824 mod p(x), x^117760 mod p(x) +DATA ·CastConst+2240(SB)/8, $0x0000000096076268 +DATA ·CastConst+2248(SB)/8, $0x00000000ff6fac3e + +// x^116800 mod p(x), x^116736 mod p(x) +DATA ·CastConst+2256(SB)/8, $0x00000001d288e4cc +DATA ·CastConst+2264(SB)/8, $0x000000017b3609c0 + +// x^115776 mod p(x), x^115712 mod p(x) +DATA ·CastConst+2272(SB)/8, $0x00000001eaac1bdc +DATA ·CastConst+2280(SB)/8, $0x0000000088c8c922 + +// x^114752 mod p(x), x^114688 mod p(x) +DATA ·CastConst+2288(SB)/8, $0x00000001f1ea39e2 +DATA ·CastConst+2296(SB)/8, $0x00000001751baae6 + +// x^113728 mod p(x), x^113664 mod p(x) +DATA ·CastConst+2304(SB)/8, $0x00000001eb6506fc +DATA ·CastConst+2312(SB)/8, $0x0000000107952972 + +// x^112704 mod p(x), x^112640 mod p(x) +DATA ·CastConst+2320(SB)/8, $0x000000010f806ffe +DATA ·CastConst+2328(SB)/8, $0x0000000162b00abe + +// x^111680 mod p(x), x^111616 mod p(x) +DATA ·CastConst+2336(SB)/8, $0x000000010408481e +DATA ·CastConst+2344(SB)/8, $0x000000000d7b404c + +// x^110656 mod p(x), x^110592 mod p(x) +DATA ·CastConst+2352(SB)/8, $0x0000000188260534 +DATA ·CastConst+2360(SB)/8, $0x00000000763b13d4 + +// x^109632 mod p(x), x^109568 mod p(x) +DATA ·CastConst+2368(SB)/8, $0x0000000058fc73e0 +DATA ·CastConst+2376(SB)/8, $0x00000000f6dc22d8 + +// x^108608 mod p(x), x^108544 mod p(x) +DATA ·CastConst+2384(SB)/8, $0x00000000391c59b8 +DATA ·CastConst+2392(SB)/8, $0x000000007daae060 + +// x^107584 mod p(x), x^107520 mod p(x) +DATA ·CastConst+2400(SB)/8, $0x000000018b638400 +DATA ·CastConst+2408(SB)/8, $0x000000013359ab7c + +// x^106560 mod p(x), x^106496 mod p(x) +DATA ·CastConst+2416(SB)/8, $0x000000011738f5c4 +DATA ·CastConst+2424(SB)/8, $0x000000008add438a + +// x^105536 mod p(x), x^105472 mod p(x) +DATA ·CastConst+2432(SB)/8, $0x000000008cf7c6da +DATA ·CastConst+2440(SB)/8, $0x00000001edbefdea + +// x^104512 mod p(x), x^104448 mod p(x) +DATA ·CastConst+2448(SB)/8, $0x00000001ef97fb16 +DATA ·CastConst+2456(SB)/8, $0x000000004104e0f8 + +// x^103488 mod p(x), x^103424 mod p(x) +DATA ·CastConst+2464(SB)/8, $0x0000000102130e20 +DATA ·CastConst+2472(SB)/8, $0x00000000b48a8222 + +// x^102464 mod p(x), x^102400 mod p(x) +DATA ·CastConst+2480(SB)/8, $0x00000000db968898 +DATA ·CastConst+2488(SB)/8, $0x00000001bcb46844 + +// x^101440 mod p(x), x^101376 mod p(x) +DATA ·CastConst+2496(SB)/8, $0x00000000b5047b5e +DATA ·CastConst+2504(SB)/8, $0x000000013293ce0a + +// x^100416 mod p(x), x^100352 mod p(x) +DATA ·CastConst+2512(SB)/8, $0x000000010b90fdb2 +DATA ·CastConst+2520(SB)/8, $0x00000001710d0844 + +// x^99392 mod p(x), x^99328 mod p(x) +DATA ·CastConst+2528(SB)/8, $0x000000004834a32e +DATA ·CastConst+2536(SB)/8, $0x0000000117907f6e + +// x^98368 mod p(x), x^98304 mod p(x) +DATA ·CastConst+2544(SB)/8, $0x0000000059c8f2b0 +DATA ·CastConst+2552(SB)/8, $0x0000000087ddf93e + +// x^97344 mod p(x), x^97280 mod p(x) +DATA ·CastConst+2560(SB)/8, $0x0000000122cec508 +DATA ·CastConst+2568(SB)/8, $0x000000005970e9b0 + +// x^96320 mod p(x), x^96256 mod p(x) +DATA ·CastConst+2576(SB)/8, $0x000000000a330cda +DATA ·CastConst+2584(SB)/8, $0x0000000185b2b7d0 + +// x^95296 mod p(x), x^95232 mod p(x) +DATA ·CastConst+2592(SB)/8, $0x000000014a47148c +DATA ·CastConst+2600(SB)/8, $0x00000001dcee0efc + +// x^94272 mod p(x), x^94208 mod p(x) +DATA ·CastConst+2608(SB)/8, $0x0000000042c61cb8 +DATA ·CastConst+2616(SB)/8, $0x0000000030da2722 + +// x^93248 mod p(x), x^93184 mod p(x) +DATA ·CastConst+2624(SB)/8, $0x0000000012fe6960 +DATA ·CastConst+2632(SB)/8, $0x000000012f925a18 + +// x^92224 mod p(x), x^92160 mod p(x) +DATA ·CastConst+2640(SB)/8, $0x00000000dbda2c20 +DATA ·CastConst+2648(SB)/8, $0x00000000dd2e357c + +// x^91200 mod p(x), x^91136 mod p(x) +DATA ·CastConst+2656(SB)/8, $0x000000011122410c +DATA ·CastConst+2664(SB)/8, $0x00000000071c80de + +// x^90176 mod p(x), x^90112 mod p(x) +DATA ·CastConst+2672(SB)/8, $0x00000000977b2070 +DATA ·CastConst+2680(SB)/8, $0x000000011513140a + +// x^89152 mod p(x), x^89088 mod p(x) +DATA ·CastConst+2688(SB)/8, $0x000000014050438e +DATA ·CastConst+2696(SB)/8, $0x00000001df876e8e + +// x^88128 mod p(x), x^88064 mod p(x) +DATA ·CastConst+2704(SB)/8, $0x0000000147c840e8 +DATA ·CastConst+2712(SB)/8, $0x000000015f81d6ce + +// x^87104 mod p(x), x^87040 mod p(x) +DATA ·CastConst+2720(SB)/8, $0x00000001cc7c88ce +DATA ·CastConst+2728(SB)/8, $0x000000019dd94dbe + +// x^86080 mod p(x), x^86016 mod p(x) +DATA ·CastConst+2736(SB)/8, $0x00000001476b35a4 +DATA ·CastConst+2744(SB)/8, $0x00000001373d206e + +// x^85056 mod p(x), x^84992 mod p(x) +DATA ·CastConst+2752(SB)/8, $0x000000013d52d508 +DATA ·CastConst+2760(SB)/8, $0x00000000668ccade + +// x^84032 mod p(x), x^83968 mod p(x) +DATA ·CastConst+2768(SB)/8, $0x000000008e4be32e +DATA ·CastConst+2776(SB)/8, $0x00000001b192d268 + +// x^83008 mod p(x), x^82944 mod p(x) +DATA ·CastConst+2784(SB)/8, $0x00000000024120fe +DATA ·CastConst+2792(SB)/8, $0x00000000e30f3a78 + +// x^81984 mod p(x), x^81920 mod p(x) +DATA ·CastConst+2800(SB)/8, $0x00000000ddecddb4 +DATA ·CastConst+2808(SB)/8, $0x000000010ef1f7bc + +// x^80960 mod p(x), x^80896 mod p(x) +DATA ·CastConst+2816(SB)/8, $0x00000000d4d403bc +DATA ·CastConst+2824(SB)/8, $0x00000001f5ac7380 + +// x^79936 mod p(x), x^79872 mod p(x) +DATA ·CastConst+2832(SB)/8, $0x00000001734b89aa +DATA ·CastConst+2840(SB)/8, $0x000000011822ea70 + +// x^78912 mod p(x), x^78848 mod p(x) +DATA ·CastConst+2848(SB)/8, $0x000000010e7a58d6 +DATA ·CastConst+2856(SB)/8, $0x00000000c3a33848 + +// x^77888 mod p(x), x^77824 mod p(x) +DATA ·CastConst+2864(SB)/8, $0x00000001f9f04e9c +DATA ·CastConst+2872(SB)/8, $0x00000001bd151c24 + +// x^76864 mod p(x), x^76800 mod p(x) +DATA ·CastConst+2880(SB)/8, $0x00000000b692225e +DATA ·CastConst+2888(SB)/8, $0x0000000056002d76 + +// x^75840 mod p(x), x^75776 mod p(x) +DATA ·CastConst+2896(SB)/8, $0x000000019b8d3f3e +DATA ·CastConst+2904(SB)/8, $0x000000014657c4f4 + +// x^74816 mod p(x), x^74752 mod p(x) +DATA ·CastConst+2912(SB)/8, $0x00000001a874f11e +DATA ·CastConst+2920(SB)/8, $0x0000000113742d7c + +// x^73792 mod p(x), x^73728 mod p(x) +DATA ·CastConst+2928(SB)/8, $0x000000010d5a4254 +DATA ·CastConst+2936(SB)/8, $0x000000019c5920ba + +// x^72768 mod p(x), x^72704 mod p(x) +DATA ·CastConst+2944(SB)/8, $0x00000000bbb2f5d6 +DATA ·CastConst+2952(SB)/8, $0x000000005216d2d6 + +// x^71744 mod p(x), x^71680 mod p(x) +DATA ·CastConst+2960(SB)/8, $0x0000000179cc0e36 +DATA ·CastConst+2968(SB)/8, $0x0000000136f5ad8a + +// x^70720 mod p(x), x^70656 mod p(x) +DATA ·CastConst+2976(SB)/8, $0x00000001dca1da4a +DATA ·CastConst+2984(SB)/8, $0x000000018b07beb6 + +// x^69696 mod p(x), x^69632 mod p(x) +DATA ·CastConst+2992(SB)/8, $0x00000000feb1a192 +DATA ·CastConst+3000(SB)/8, $0x00000000db1e93b0 + +// x^68672 mod p(x), x^68608 mod p(x) +DATA ·CastConst+3008(SB)/8, $0x00000000d1eeedd6 +DATA ·CastConst+3016(SB)/8, $0x000000000b96fa3a + +// x^67648 mod p(x), x^67584 mod p(x) +DATA ·CastConst+3024(SB)/8, $0x000000008fad9bb4 +DATA ·CastConst+3032(SB)/8, $0x00000001d9968af0 + +// x^66624 mod p(x), x^66560 mod p(x) +DATA ·CastConst+3040(SB)/8, $0x00000001884938e4 +DATA ·CastConst+3048(SB)/8, $0x000000000e4a77a2 + +// x^65600 mod p(x), x^65536 mod p(x) +DATA ·CastConst+3056(SB)/8, $0x00000001bc2e9bc0 +DATA ·CastConst+3064(SB)/8, $0x00000000508c2ac8 + +// x^64576 mod p(x), x^64512 mod p(x) +DATA ·CastConst+3072(SB)/8, $0x00000001f9658a68 +DATA ·CastConst+3080(SB)/8, $0x0000000021572a80 + +// x^63552 mod p(x), x^63488 mod p(x) +DATA ·CastConst+3088(SB)/8, $0x000000001b9224fc +DATA ·CastConst+3096(SB)/8, $0x00000001b859daf2 + +// x^62528 mod p(x), x^62464 mod p(x) +DATA ·CastConst+3104(SB)/8, $0x0000000055b2fb84 +DATA ·CastConst+3112(SB)/8, $0x000000016f788474 + +// x^61504 mod p(x), x^61440 mod p(x) +DATA ·CastConst+3120(SB)/8, $0x000000018b090348 +DATA ·CastConst+3128(SB)/8, $0x00000001b438810e + +// x^60480 mod p(x), x^60416 mod p(x) +DATA ·CastConst+3136(SB)/8, $0x000000011ccbd5ea +DATA ·CastConst+3144(SB)/8, $0x0000000095ddc6f2 + +// x^59456 mod p(x), x^59392 mod p(x) +DATA ·CastConst+3152(SB)/8, $0x0000000007ae47f8 +DATA ·CastConst+3160(SB)/8, $0x00000001d977c20c + +// x^58432 mod p(x), x^58368 mod p(x) +DATA ·CastConst+3168(SB)/8, $0x0000000172acbec0 +DATA ·CastConst+3176(SB)/8, $0x00000000ebedb99a + +// x^57408 mod p(x), x^57344 mod p(x) +DATA ·CastConst+3184(SB)/8, $0x00000001c6e3ff20 +DATA ·CastConst+3192(SB)/8, $0x00000001df9e9e92 + +// x^56384 mod p(x), x^56320 mod p(x) +DATA ·CastConst+3200(SB)/8, $0x00000000e1b38744 +DATA ·CastConst+3208(SB)/8, $0x00000001a4a3f952 + +// x^55360 mod p(x), x^55296 mod p(x) +DATA ·CastConst+3216(SB)/8, $0x00000000791585b2 +DATA ·CastConst+3224(SB)/8, $0x00000000e2f51220 + +// x^54336 mod p(x), x^54272 mod p(x) +DATA ·CastConst+3232(SB)/8, $0x00000000ac53b894 +DATA ·CastConst+3240(SB)/8, $0x000000004aa01f3e + +// x^53312 mod p(x), x^53248 mod p(x) +DATA ·CastConst+3248(SB)/8, $0x00000001ed5f2cf4 +DATA ·CastConst+3256(SB)/8, $0x00000000b3e90a58 + +// x^52288 mod p(x), x^52224 mod p(x) +DATA ·CastConst+3264(SB)/8, $0x00000001df48b2e0 +DATA ·CastConst+3272(SB)/8, $0x000000000c9ca2aa + +// x^51264 mod p(x), x^51200 mod p(x) +DATA ·CastConst+3280(SB)/8, $0x00000000049c1c62 +DATA ·CastConst+3288(SB)/8, $0x0000000151682316 + +// x^50240 mod p(x), x^50176 mod p(x) +DATA ·CastConst+3296(SB)/8, $0x000000017c460c12 +DATA ·CastConst+3304(SB)/8, $0x0000000036fce78c + +// x^49216 mod p(x), x^49152 mod p(x) +DATA ·CastConst+3312(SB)/8, $0x000000015be4da7e +DATA ·CastConst+3320(SB)/8, $0x000000009037dc10 + +// x^48192 mod p(x), x^48128 mod p(x) +DATA ·CastConst+3328(SB)/8, $0x000000010f38f668 +DATA ·CastConst+3336(SB)/8, $0x00000000d3298582 + +// x^47168 mod p(x), x^47104 mod p(x) +DATA ·CastConst+3344(SB)/8, $0x0000000039f40a00 +DATA ·CastConst+3352(SB)/8, $0x00000001b42e8ad6 + +// x^46144 mod p(x), x^46080 mod p(x) +DATA ·CastConst+3360(SB)/8, $0x00000000bd4c10c4 +DATA ·CastConst+3368(SB)/8, $0x00000000142a9838 + +// x^45120 mod p(x), x^45056 mod p(x) +DATA ·CastConst+3376(SB)/8, $0x0000000042db1d98 +DATA ·CastConst+3384(SB)/8, $0x0000000109c7f190 + +// x^44096 mod p(x), x^44032 mod p(x) +DATA ·CastConst+3392(SB)/8, $0x00000001c905bae6 +DATA ·CastConst+3400(SB)/8, $0x0000000056ff9310 + +// x^43072 mod p(x), x^43008 mod p(x) +DATA ·CastConst+3408(SB)/8, $0x00000000069d40ea +DATA ·CastConst+3416(SB)/8, $0x00000001594513aa + +// x^42048 mod p(x), x^41984 mod p(x) +DATA ·CastConst+3424(SB)/8, $0x000000008e4fbad0 +DATA ·CastConst+3432(SB)/8, $0x00000001e3b5b1e8 + +// x^41024 mod p(x), x^40960 mod p(x) +DATA ·CastConst+3440(SB)/8, $0x0000000047bedd46 +DATA ·CastConst+3448(SB)/8, $0x000000011dd5fc08 + +// x^40000 mod p(x), x^39936 mod p(x) +DATA ·CastConst+3456(SB)/8, $0x0000000026396bf8 +DATA ·CastConst+3464(SB)/8, $0x00000001675f0cc2 + +// x^38976 mod p(x), x^38912 mod p(x) +DATA ·CastConst+3472(SB)/8, $0x00000000379beb92 +DATA ·CastConst+3480(SB)/8, $0x00000000d1c8dd44 + +// x^37952 mod p(x), x^37888 mod p(x) +DATA ·CastConst+3488(SB)/8, $0x000000000abae54a +DATA ·CastConst+3496(SB)/8, $0x0000000115ebd3d8 + +// x^36928 mod p(x), x^36864 mod p(x) +DATA ·CastConst+3504(SB)/8, $0x0000000007e6a128 +DATA ·CastConst+3512(SB)/8, $0x00000001ecbd0dac + +// x^35904 mod p(x), x^35840 mod p(x) +DATA ·CastConst+3520(SB)/8, $0x000000000ade29d2 +DATA ·CastConst+3528(SB)/8, $0x00000000cdf67af2 + +// x^34880 mod p(x), x^34816 mod p(x) +DATA ·CastConst+3536(SB)/8, $0x00000000f974c45c +DATA ·CastConst+3544(SB)/8, $0x000000004c01ff4c + +// x^33856 mod p(x), x^33792 mod p(x) +DATA ·CastConst+3552(SB)/8, $0x00000000e77ac60a +DATA ·CastConst+3560(SB)/8, $0x00000000f2d8657e + +// x^32832 mod p(x), x^32768 mod p(x) +DATA ·CastConst+3568(SB)/8, $0x0000000145895816 +DATA ·CastConst+3576(SB)/8, $0x000000006bae74c4 + +// x^31808 mod p(x), x^31744 mod p(x) +DATA ·CastConst+3584(SB)/8, $0x0000000038e362be +DATA ·CastConst+3592(SB)/8, $0x0000000152af8aa0 + +// x^30784 mod p(x), x^30720 mod p(x) +DATA ·CastConst+3600(SB)/8, $0x000000007f991a64 +DATA ·CastConst+3608(SB)/8, $0x0000000004663802 + +// x^29760 mod p(x), x^29696 mod p(x) +DATA ·CastConst+3616(SB)/8, $0x00000000fa366d3a +DATA ·CastConst+3624(SB)/8, $0x00000001ab2f5afc + +// x^28736 mod p(x), x^28672 mod p(x) +DATA ·CastConst+3632(SB)/8, $0x00000001a2bb34f0 +DATA ·CastConst+3640(SB)/8, $0x0000000074a4ebd4 + +// x^27712 mod p(x), x^27648 mod p(x) +DATA ·CastConst+3648(SB)/8, $0x0000000028a9981e +DATA ·CastConst+3656(SB)/8, $0x00000001d7ab3a4c + +// x^26688 mod p(x), x^26624 mod p(x) +DATA ·CastConst+3664(SB)/8, $0x00000001dbc672be +DATA ·CastConst+3672(SB)/8, $0x00000001a8da60c6 + +// x^25664 mod p(x), x^25600 mod p(x) +DATA ·CastConst+3680(SB)/8, $0x00000000b04d77f6 +DATA ·CastConst+3688(SB)/8, $0x000000013cf63820 + +// x^24640 mod p(x), x^24576 mod p(x) +DATA ·CastConst+3696(SB)/8, $0x0000000124400d96 +DATA ·CastConst+3704(SB)/8, $0x00000000bec12e1e + +// x^23616 mod p(x), x^23552 mod p(x) +DATA ·CastConst+3712(SB)/8, $0x000000014ca4b414 +DATA ·CastConst+3720(SB)/8, $0x00000001c6368010 + +// x^22592 mod p(x), x^22528 mod p(x) +DATA ·CastConst+3728(SB)/8, $0x000000012fe2c938 +DATA ·CastConst+3736(SB)/8, $0x00000001e6e78758 + +// x^21568 mod p(x), x^21504 mod p(x) +DATA ·CastConst+3744(SB)/8, $0x00000001faed01e6 +DATA ·CastConst+3752(SB)/8, $0x000000008d7f2b3c + +// x^20544 mod p(x), x^20480 mod p(x) +DATA ·CastConst+3760(SB)/8, $0x000000007e80ecfe +DATA ·CastConst+3768(SB)/8, $0x000000016b4a156e + +// x^19520 mod p(x), x^19456 mod p(x) +DATA ·CastConst+3776(SB)/8, $0x0000000098daee94 +DATA ·CastConst+3784(SB)/8, $0x00000001c63cfeb6 + +// x^18496 mod p(x), x^18432 mod p(x) +DATA ·CastConst+3792(SB)/8, $0x000000010a04edea +DATA ·CastConst+3800(SB)/8, $0x000000015f902670 + +// x^17472 mod p(x), x^17408 mod p(x) +DATA ·CastConst+3808(SB)/8, $0x00000001c00b4524 +DATA ·CastConst+3816(SB)/8, $0x00000001cd5de11e + +// x^16448 mod p(x), x^16384 mod p(x) +DATA ·CastConst+3824(SB)/8, $0x0000000170296550 +DATA ·CastConst+3832(SB)/8, $0x000000001acaec54 + +// x^15424 mod p(x), x^15360 mod p(x) +DATA ·CastConst+3840(SB)/8, $0x0000000181afaa48 +DATA ·CastConst+3848(SB)/8, $0x000000002bd0ca78 + +// x^14400 mod p(x), x^14336 mod p(x) +DATA ·CastConst+3856(SB)/8, $0x0000000185a31ffa +DATA ·CastConst+3864(SB)/8, $0x0000000032d63d5c + +// x^13376 mod p(x), x^13312 mod p(x) +DATA ·CastConst+3872(SB)/8, $0x000000002469f608 +DATA ·CastConst+3880(SB)/8, $0x000000001c6d4e4c + +// x^12352 mod p(x), x^12288 mod p(x) +DATA ·CastConst+3888(SB)/8, $0x000000006980102a +DATA ·CastConst+3896(SB)/8, $0x0000000106a60b92 + +// x^11328 mod p(x), x^11264 mod p(x) +DATA ·CastConst+3904(SB)/8, $0x0000000111ea9ca8 +DATA ·CastConst+3912(SB)/8, $0x00000000d3855e12 + +// x^10304 mod p(x), x^10240 mod p(x) +DATA ·CastConst+3920(SB)/8, $0x00000001bd1d29ce +DATA ·CastConst+3928(SB)/8, $0x00000000e3125636 + +// x^9280 mod p(x), x^9216 mod p(x) +DATA ·CastConst+3936(SB)/8, $0x00000001b34b9580 +DATA ·CastConst+3944(SB)/8, $0x000000009e8f7ea4 + +// x^8256 mod p(x), x^8192 mod p(x) +DATA ·CastConst+3952(SB)/8, $0x000000003076054e +DATA ·CastConst+3960(SB)/8, $0x00000001c82e562c + +// x^7232 mod p(x), x^7168 mod p(x) +DATA ·CastConst+3968(SB)/8, $0x000000012a608ea4 +DATA ·CastConst+3976(SB)/8, $0x00000000ca9f09ce + +// x^6208 mod p(x), x^6144 mod p(x) +DATA ·CastConst+3984(SB)/8, $0x00000000784d05fe +DATA ·CastConst+3992(SB)/8, $0x00000000c63764e6 + +// x^5184 mod p(x), x^5120 mod p(x) +DATA ·CastConst+4000(SB)/8, $0x000000016ef0d82a +DATA ·CastConst+4008(SB)/8, $0x0000000168d2e49e + +// x^4160 mod p(x), x^4096 mod p(x) +DATA ·CastConst+4016(SB)/8, $0x0000000075bda454 +DATA ·CastConst+4024(SB)/8, $0x00000000e986c148 + +// x^3136 mod p(x), x^3072 mod p(x) +DATA ·CastConst+4032(SB)/8, $0x000000003dc0a1c4 +DATA ·CastConst+4040(SB)/8, $0x00000000cfb65894 + +// x^2112 mod p(x), x^2048 mod p(x) +DATA ·CastConst+4048(SB)/8, $0x00000000e9a5d8be +DATA ·CastConst+4056(SB)/8, $0x0000000111cadee4 + +// x^1088 mod p(x), x^1024 mod p(x) +DATA ·CastConst+4064(SB)/8, $0x00000001609bc4b4 +DATA ·CastConst+4072(SB)/8, $0x0000000171fb63ce + +// x^2048 mod p(x), x^2016 mod p(x), x^1984 mod p(x), x^1952 mod p(x) +DATA ·CastConst+4080(SB)/8, $0x5cf015c388e56f72 +DATA ·CastConst+4088(SB)/8, $0x7fec2963e5bf8048 + +// x^1920 mod p(x), x^1888 mod p(x), x^1856 mod p(x), x^1824 mod p(x) +DATA ·CastConst+4096(SB)/8, $0x963a18920246e2e6 +DATA ·CastConst+4104(SB)/8, $0x38e888d4844752a9 + +// x^1792 mod p(x), x^1760 mod p(x), x^1728 mod p(x), x^1696 mod p(x) +DATA ·CastConst+4112(SB)/8, $0x419a441956993a31 +DATA ·CastConst+4120(SB)/8, $0x42316c00730206ad + +// x^1664 mod p(x), x^1632 mod p(x), x^1600 mod p(x), x^1568 mod p(x) +DATA ·CastConst+4128(SB)/8, $0x924752ba2b830011 +DATA ·CastConst+4136(SB)/8, $0x543d5c543e65ddf9 + +// x^1536 mod p(x), x^1504 mod p(x), x^1472 mod p(x), x^1440 mod p(x) +DATA ·CastConst+4144(SB)/8, $0x55bd7f9518e4a304 +DATA ·CastConst+4152(SB)/8, $0x78e87aaf56767c92 + +// x^1408 mod p(x), x^1376 mod p(x), x^1344 mod p(x), x^1312 mod p(x) +DATA ·CastConst+4160(SB)/8, $0x6d76739fe0553f1e +DATA ·CastConst+4168(SB)/8, $0x8f68fcec1903da7f + +// x^1280 mod p(x), x^1248 mod p(x), x^1216 mod p(x), x^1184 mod p(x) +DATA ·CastConst+4176(SB)/8, $0xc133722b1fe0b5c3 +DATA ·CastConst+4184(SB)/8, $0x3f4840246791d588 + +// x^1152 mod p(x), x^1120 mod p(x), x^1088 mod p(x), x^1056 mod p(x) +DATA ·CastConst+4192(SB)/8, $0x64b67ee0e55ef1f3 +DATA ·CastConst+4200(SB)/8, $0x34c96751b04de25a + +// x^1024 mod p(x), x^992 mod p(x), x^960 mod p(x), x^928 mod p(x) +DATA ·CastConst+4208(SB)/8, $0x069db049b8fdb1e7 +DATA ·CastConst+4216(SB)/8, $0x156c8e180b4a395b + +// x^896 mod p(x), x^864 mod p(x), x^832 mod p(x), x^800 mod p(x) +DATA ·CastConst+4224(SB)/8, $0xa11bfaf3c9e90b9e +DATA ·CastConst+4232(SB)/8, $0xe0b99ccbe661f7be + +// x^768 mod p(x), x^736 mod p(x), x^704 mod p(x), x^672 mod p(x) +DATA ·CastConst+4240(SB)/8, $0x817cdc5119b29a35 +DATA ·CastConst+4248(SB)/8, $0x041d37768cd75659 + +// x^640 mod p(x), x^608 mod p(x), x^576 mod p(x), x^544 mod p(x) +DATA ·CastConst+4256(SB)/8, $0x1ce9d94b36c41f1c +DATA ·CastConst+4264(SB)/8, $0x3a0777818cfaa965 + +// x^512 mod p(x), x^480 mod p(x), x^448 mod p(x), x^416 mod p(x) +DATA ·CastConst+4272(SB)/8, $0x4f256efcb82be955 +DATA ·CastConst+4280(SB)/8, $0x0e148e8252377a55 + +// x^384 mod p(x), x^352 mod p(x), x^320 mod p(x), x^288 mod p(x) +DATA ·CastConst+4288(SB)/8, $0xec1631edb2dea967 +DATA ·CastConst+4296(SB)/8, $0x9c25531d19e65dde + +// x^256 mod p(x), x^224 mod p(x), x^192 mod p(x), x^160 mod p(x) +DATA ·CastConst+4304(SB)/8, $0x5d27e147510ac59a +DATA ·CastConst+4312(SB)/8, $0x790606ff9957c0a6 + +// x^128 mod p(x), x^96 mod p(x), x^64 mod p(x), x^32 mod p(x) +DATA ·CastConst+4320(SB)/8, $0xa66805eb18b8ea18 +DATA ·CastConst+4328(SB)/8, $0x82f63b786ea2d55c + +GLOBL ·CastConst(SB), RODATA, $4336 + +// Barrett constant m - (4^32)/n +DATA ·CastBarConst(SB)/8, $0x00000000dea713f1 +DATA ·CastBarConst+8(SB)/8, $0x0000000000000000 +DATA ·CastBarConst+16(SB)/8, $0x0000000105ec76f1 +DATA ·CastBarConst+24(SB)/8, $0x0000000000000000 +GLOBL ·CastBarConst(SB), RODATA, $32 + +// Reduce 262144 kbits to 1024 bits +// x^261184 mod p(x), x^261120 mod p(x) +DATA ·KoopConst+0(SB)/8, $0x00000000d72535b2 +DATA ·KoopConst+8(SB)/8, $0x000000007fd74916 + +// x^260160 mod p(x), x^260096 mod p(x) +DATA ·KoopConst+16(SB)/8, $0x0000000118a2a1b4 +DATA ·KoopConst+24(SB)/8, $0x000000010e944b56 + +// x^259136 mod p(x), x^259072 mod p(x) +DATA ·KoopConst+32(SB)/8, $0x0000000147b5c49c +DATA ·KoopConst+40(SB)/8, $0x00000000bfe71c20 + +// x^258112 mod p(x), x^258048 mod p(x) +DATA ·KoopConst+48(SB)/8, $0x00000001ca76a040 +DATA ·KoopConst+56(SB)/8, $0x0000000021324d9a + +// x^257088 mod p(x), x^257024 mod p(x) +DATA ·KoopConst+64(SB)/8, $0x00000001e3152efc +DATA ·KoopConst+72(SB)/8, $0x00000000d20972ce + +// x^256064 mod p(x), x^256000 mod p(x) +DATA ·KoopConst+80(SB)/8, $0x00000001b0349792 +DATA ·KoopConst+88(SB)/8, $0x000000003475ea06 + +// x^255040 mod p(x), x^254976 mod p(x) +DATA ·KoopConst+96(SB)/8, $0x0000000120a60fe0 +DATA ·KoopConst+104(SB)/8, $0x00000001e40e36c4 + +// x^254016 mod p(x), x^253952 mod p(x) +DATA ·KoopConst+112(SB)/8, $0x00000000b3c4b082 +DATA ·KoopConst+120(SB)/8, $0x00000000b2490102 + +// x^252992 mod p(x), x^252928 mod p(x) +DATA ·KoopConst+128(SB)/8, $0x000000017fe9f3d2 +DATA ·KoopConst+136(SB)/8, $0x000000016b9e1332 + +// x^251968 mod p(x), x^251904 mod p(x) +DATA ·KoopConst+144(SB)/8, $0x0000000145703cbe +DATA ·KoopConst+152(SB)/8, $0x00000001d6c378f4 + +// x^250944 mod p(x), x^250880 mod p(x) +DATA ·KoopConst+160(SB)/8, $0x0000000107551c9c +DATA ·KoopConst+168(SB)/8, $0x0000000085796eac + +// x^249920 mod p(x), x^249856 mod p(x) +DATA ·KoopConst+176(SB)/8, $0x000000003865a702 +DATA ·KoopConst+184(SB)/8, $0x000000019d2f3aaa + +// x^248896 mod p(x), x^248832 mod p(x) +DATA ·KoopConst+192(SB)/8, $0x000000005504f9b8 +DATA ·KoopConst+200(SB)/8, $0x00000001554ddbd4 + +// x^247872 mod p(x), x^247808 mod p(x) +DATA ·KoopConst+208(SB)/8, $0x00000000239bcdd4 +DATA ·KoopConst+216(SB)/8, $0x00000000a76376b0 + +// x^246848 mod p(x), x^246784 mod p(x) +DATA ·KoopConst+224(SB)/8, $0x00000000caead774 +DATA ·KoopConst+232(SB)/8, $0x0000000139b7283c + +// x^245824 mod p(x), x^245760 mod p(x) +DATA ·KoopConst+240(SB)/8, $0x0000000022a3fa16 +DATA ·KoopConst+248(SB)/8, $0x0000000111087030 + +// x^244800 mod p(x), x^244736 mod p(x) +DATA ·KoopConst+256(SB)/8, $0x000000011f89160e +DATA ·KoopConst+264(SB)/8, $0x00000000ad786dc2 + +// x^243776 mod p(x), x^243712 mod p(x) +DATA ·KoopConst+272(SB)/8, $0x00000001a976c248 +DATA ·KoopConst+280(SB)/8, $0x00000000b7a1d068 + +// x^242752 mod p(x), x^242688 mod p(x) +DATA ·KoopConst+288(SB)/8, $0x00000000c20d09c8 +DATA ·KoopConst+296(SB)/8, $0x000000009c5c591c + +// x^241728 mod p(x), x^241664 mod p(x) +DATA ·KoopConst+304(SB)/8, $0x000000016264fe38 +DATA ·KoopConst+312(SB)/8, $0x000000016482aa1a + +// x^240704 mod p(x), x^240640 mod p(x) +DATA ·KoopConst+320(SB)/8, $0x00000001b57aee6a +DATA ·KoopConst+328(SB)/8, $0x000000009a409ba8 + +// x^239680 mod p(x), x^239616 mod p(x) +DATA ·KoopConst+336(SB)/8, $0x00000000e8f1be0a +DATA ·KoopConst+344(SB)/8, $0x00000001ad8eaed8 + +// x^238656 mod p(x), x^238592 mod p(x) +DATA ·KoopConst+352(SB)/8, $0x0000000053fcd0fc +DATA ·KoopConst+360(SB)/8, $0x000000017558b57a + +// x^237632 mod p(x), x^237568 mod p(x) +DATA ·KoopConst+368(SB)/8, $0x000000012df9d496 +DATA ·KoopConst+376(SB)/8, $0x00000000cbb749c8 + +// x^236608 mod p(x), x^236544 mod p(x) +DATA ·KoopConst+384(SB)/8, $0x000000004cb0db26 +DATA ·KoopConst+392(SB)/8, $0x000000008524fc5a + +// x^235584 mod p(x), x^235520 mod p(x) +DATA ·KoopConst+400(SB)/8, $0x00000001150c4584 +DATA ·KoopConst+408(SB)/8, $0x0000000028ce6b76 + +// x^234560 mod p(x), x^234496 mod p(x) +DATA ·KoopConst+416(SB)/8, $0x0000000104f52056 +DATA ·KoopConst+424(SB)/8, $0x00000000e0c48bdc + +// x^233536 mod p(x), x^233472 mod p(x) +DATA ·KoopConst+432(SB)/8, $0x000000008ea11ac8 +DATA ·KoopConst+440(SB)/8, $0x000000003dd3bf9a + +// x^232512 mod p(x), x^232448 mod p(x) +DATA ·KoopConst+448(SB)/8, $0x00000001cc0a3942 +DATA ·KoopConst+456(SB)/8, $0x00000000cb71066c + +// x^231488 mod p(x), x^231424 mod p(x) +DATA ·KoopConst+464(SB)/8, $0x00000000d26231e6 +DATA ·KoopConst+472(SB)/8, $0x00000001d4ee1540 + +// x^230464 mod p(x), x^230400 mod p(x) +DATA ·KoopConst+480(SB)/8, $0x00000000c70d5730 +DATA ·KoopConst+488(SB)/8, $0x00000001d82bed0a + +// x^229440 mod p(x), x^229376 mod p(x) +DATA ·KoopConst+496(SB)/8, $0x00000000e215dfc4 +DATA ·KoopConst+504(SB)/8, $0x000000016e0c7d86 + +// x^228416 mod p(x), x^228352 mod p(x) +DATA ·KoopConst+512(SB)/8, $0x000000013870d0dc +DATA ·KoopConst+520(SB)/8, $0x00000001437051b0 + +// x^227392 mod p(x), x^227328 mod p(x) +DATA ·KoopConst+528(SB)/8, $0x0000000153e4cf3c +DATA ·KoopConst+536(SB)/8, $0x00000000f9a8d4be + +// x^226368 mod p(x), x^226304 mod p(x) +DATA ·KoopConst+544(SB)/8, $0x0000000125f6fdf0 +DATA ·KoopConst+552(SB)/8, $0x000000016b09be1c + +// x^225344 mod p(x), x^225280 mod p(x) +DATA ·KoopConst+560(SB)/8, $0x0000000157ba3a82 +DATA ·KoopConst+568(SB)/8, $0x0000000105f50ed6 + +// x^224320 mod p(x), x^224256 mod p(x) +DATA ·KoopConst+576(SB)/8, $0x00000001cf711064 +DATA ·KoopConst+584(SB)/8, $0x00000001ca7fe3cc + +// x^223296 mod p(x), x^223232 mod p(x) +DATA ·KoopConst+592(SB)/8, $0x00000001006353d2 +DATA ·KoopConst+600(SB)/8, $0x0000000192372e78 + +// x^222272 mod p(x), x^222208 mod p(x) +DATA ·KoopConst+608(SB)/8, $0x000000010cd9faec +DATA ·KoopConst+616(SB)/8, $0x000000008a47af7e + +// x^221248 mod p(x), x^221184 mod p(x) +DATA ·KoopConst+624(SB)/8, $0x000000012148b190 +DATA ·KoopConst+632(SB)/8, $0x00000000a67473e8 + +// x^220224 mod p(x), x^220160 mod p(x) +DATA ·KoopConst+640(SB)/8, $0x00000000776473d6 +DATA ·KoopConst+648(SB)/8, $0x000000013689f2fa + +// x^219200 mod p(x), x^219136 mod p(x) +DATA ·KoopConst+656(SB)/8, $0x00000001ce765bd6 +DATA ·KoopConst+664(SB)/8, $0x00000000e7231774 + +// x^218176 mod p(x), x^218112 mod p(x) +DATA ·KoopConst+672(SB)/8, $0x00000000b29165e8 +DATA ·KoopConst+680(SB)/8, $0x0000000011b5ae68 + +// x^217152 mod p(x), x^217088 mod p(x) +DATA ·KoopConst+688(SB)/8, $0x0000000084ff5a68 +DATA ·KoopConst+696(SB)/8, $0x000000004fd5c188 + +// x^216128 mod p(x), x^216064 mod p(x) +DATA ·KoopConst+704(SB)/8, $0x00000001921e9076 +DATA ·KoopConst+712(SB)/8, $0x000000012148fa22 + +// x^215104 mod p(x), x^215040 mod p(x) +DATA ·KoopConst+720(SB)/8, $0x000000009a753a3c +DATA ·KoopConst+728(SB)/8, $0x000000010cff4f3e + +// x^214080 mod p(x), x^214016 mod p(x) +DATA ·KoopConst+736(SB)/8, $0x000000000251401e +DATA ·KoopConst+744(SB)/8, $0x00000001f9d991d4 + +// x^213056 mod p(x), x^212992 mod p(x) +DATA ·KoopConst+752(SB)/8, $0x00000001f65541fa +DATA ·KoopConst+760(SB)/8, $0x00000001c31db214 + +// x^212032 mod p(x), x^211968 mod p(x) +DATA ·KoopConst+768(SB)/8, $0x00000001d8c8117a +DATA ·KoopConst+776(SB)/8, $0x00000001849fba4a + +// x^211008 mod p(x), x^210944 mod p(x) +DATA ·KoopConst+784(SB)/8, $0x000000014f7a2200 +DATA ·KoopConst+792(SB)/8, $0x00000001cb603184 + +// x^209984 mod p(x), x^209920 mod p(x) +DATA ·KoopConst+800(SB)/8, $0x000000005154a9f4 +DATA ·KoopConst+808(SB)/8, $0x0000000132db7116 + +// x^208960 mod p(x), x^208896 mod p(x) +DATA ·KoopConst+816(SB)/8, $0x00000001dfc69196 +DATA ·KoopConst+824(SB)/8, $0x0000000010694e22 + +// x^207936 mod p(x), x^207872 mod p(x) +DATA ·KoopConst+832(SB)/8, $0x00000001c29f1aa0 +DATA ·KoopConst+840(SB)/8, $0x0000000103b7b478 + +// x^206912 mod p(x), x^206848 mod p(x) +DATA ·KoopConst+848(SB)/8, $0x000000013785f232 +DATA ·KoopConst+856(SB)/8, $0x000000000ab44030 + +// x^205888 mod p(x), x^205824 mod p(x) +DATA ·KoopConst+864(SB)/8, $0x000000010133536e +DATA ·KoopConst+872(SB)/8, $0x0000000131385b68 + +// x^204864 mod p(x), x^204800 mod p(x) +DATA ·KoopConst+880(SB)/8, $0x00000001d45421dc +DATA ·KoopConst+888(SB)/8, $0x00000001761dab66 + +// x^203840 mod p(x), x^203776 mod p(x) +DATA ·KoopConst+896(SB)/8, $0x000000000b59cc28 +DATA ·KoopConst+904(SB)/8, $0x000000012cf0a2a6 + +// x^202816 mod p(x), x^202752 mod p(x) +DATA ·KoopConst+912(SB)/8, $0x00000001f2f74aba +DATA ·KoopConst+920(SB)/8, $0x00000001f4ce25a2 + +// x^201792 mod p(x), x^201728 mod p(x) +DATA ·KoopConst+928(SB)/8, $0x00000000fb308e7e +DATA ·KoopConst+936(SB)/8, $0x000000014c2aae20 + +// x^200768 mod p(x), x^200704 mod p(x) +DATA ·KoopConst+944(SB)/8, $0x0000000167583fa6 +DATA ·KoopConst+952(SB)/8, $0x00000001c162a55a + +// x^199744 mod p(x), x^199680 mod p(x) +DATA ·KoopConst+960(SB)/8, $0x000000017ebb13e0 +DATA ·KoopConst+968(SB)/8, $0x0000000185681a40 + +// x^198720 mod p(x), x^198656 mod p(x) +DATA ·KoopConst+976(SB)/8, $0x00000001ca653306 +DATA ·KoopConst+984(SB)/8, $0x00000001f2642b48 + +// x^197696 mod p(x), x^197632 mod p(x) +DATA ·KoopConst+992(SB)/8, $0x0000000093bb6946 +DATA ·KoopConst+1000(SB)/8, $0x00000001d9cb5a78 + +// x^196672 mod p(x), x^196608 mod p(x) +DATA ·KoopConst+1008(SB)/8, $0x00000000cbc1553e +DATA ·KoopConst+1016(SB)/8, $0x000000008059328c + +// x^195648 mod p(x), x^195584 mod p(x) +DATA ·KoopConst+1024(SB)/8, $0x00000001f9a86fec +DATA ·KoopConst+1032(SB)/8, $0x000000009373c360 + +// x^194624 mod p(x), x^194560 mod p(x) +DATA ·KoopConst+1040(SB)/8, $0x0000000005c52d8a +DATA ·KoopConst+1048(SB)/8, $0x00000001a14061d6 + +// x^193600 mod p(x), x^193536 mod p(x) +DATA ·KoopConst+1056(SB)/8, $0x000000010d8dc668 +DATA ·KoopConst+1064(SB)/8, $0x00000000a9864d48 + +// x^192576 mod p(x), x^192512 mod p(x) +DATA ·KoopConst+1072(SB)/8, $0x0000000158571310 +DATA ·KoopConst+1080(SB)/8, $0x000000011df8c040 + +// x^191552 mod p(x), x^191488 mod p(x) +DATA ·KoopConst+1088(SB)/8, $0x0000000166102348 +DATA ·KoopConst+1096(SB)/8, $0x0000000023a3e6b6 + +// x^190528 mod p(x), x^190464 mod p(x) +DATA ·KoopConst+1104(SB)/8, $0x0000000009513050 +DATA ·KoopConst+1112(SB)/8, $0x00000001207db28a + +// x^189504 mod p(x), x^189440 mod p(x) +DATA ·KoopConst+1120(SB)/8, $0x00000000b0725c74 +DATA ·KoopConst+1128(SB)/8, $0x00000000f94bc632 + +// x^188480 mod p(x), x^188416 mod p(x) +DATA ·KoopConst+1136(SB)/8, $0x000000002985c7e2 +DATA ·KoopConst+1144(SB)/8, $0x00000000ea32cbf6 + +// x^187456 mod p(x), x^187392 mod p(x) +DATA ·KoopConst+1152(SB)/8, $0x00000000a7d4da9e +DATA ·KoopConst+1160(SB)/8, $0x0000000004eb981a + +// x^186432 mod p(x), x^186368 mod p(x) +DATA ·KoopConst+1168(SB)/8, $0x000000000a3f8792 +DATA ·KoopConst+1176(SB)/8, $0x00000000ca8ce712 + +// x^185408 mod p(x), x^185344 mod p(x) +DATA ·KoopConst+1184(SB)/8, $0x00000001ca2c1ce4 +DATA ·KoopConst+1192(SB)/8, $0x0000000065ba801c + +// x^184384 mod p(x), x^184320 mod p(x) +DATA ·KoopConst+1200(SB)/8, $0x00000000e2900196 +DATA ·KoopConst+1208(SB)/8, $0x0000000194aade7a + +// x^183360 mod p(x), x^183296 mod p(x) +DATA ·KoopConst+1216(SB)/8, $0x00000001fbadf0e4 +DATA ·KoopConst+1224(SB)/8, $0x00000001e7939fb2 + +// x^182336 mod p(x), x^182272 mod p(x) +DATA ·KoopConst+1232(SB)/8, $0x00000000d5d96c40 +DATA ·KoopConst+1240(SB)/8, $0x0000000098e5fe22 + +// x^181312 mod p(x), x^181248 mod p(x) +DATA ·KoopConst+1248(SB)/8, $0x000000015c11d3f2 +DATA ·KoopConst+1256(SB)/8, $0x000000016bba0324 + +// x^180288 mod p(x), x^180224 mod p(x) +DATA ·KoopConst+1264(SB)/8, $0x0000000111fb2648 +DATA ·KoopConst+1272(SB)/8, $0x0000000104dce052 + +// x^179264 mod p(x), x^179200 mod p(x) +DATA ·KoopConst+1280(SB)/8, $0x00000001d9f3a564 +DATA ·KoopConst+1288(SB)/8, $0x00000001af31a42e + +// x^178240 mod p(x), x^178176 mod p(x) +DATA ·KoopConst+1296(SB)/8, $0x00000001b556cd1e +DATA ·KoopConst+1304(SB)/8, $0x00000001c56c57ba + +// x^177216 mod p(x), x^177152 mod p(x) +DATA ·KoopConst+1312(SB)/8, $0x0000000101994d2c +DATA ·KoopConst+1320(SB)/8, $0x00000000f6bb1a2e + +// x^176192 mod p(x), x^176128 mod p(x) +DATA ·KoopConst+1328(SB)/8, $0x00000001e8dbf09c +DATA ·KoopConst+1336(SB)/8, $0x00000001abdbf2b2 + +// x^175168 mod p(x), x^175104 mod p(x) +DATA ·KoopConst+1344(SB)/8, $0x000000015580543a +DATA ·KoopConst+1352(SB)/8, $0x00000001a665a880 + +// x^174144 mod p(x), x^174080 mod p(x) +DATA ·KoopConst+1360(SB)/8, $0x00000000c7074f24 +DATA ·KoopConst+1368(SB)/8, $0x00000000c102c700 + +// x^173120 mod p(x), x^173056 mod p(x) +DATA ·KoopConst+1376(SB)/8, $0x00000000fa4112b0 +DATA ·KoopConst+1384(SB)/8, $0x00000000ee362a50 + +// x^172096 mod p(x), x^172032 mod p(x) +DATA ·KoopConst+1392(SB)/8, $0x00000000e786c13e +DATA ·KoopConst+1400(SB)/8, $0x0000000045f29038 + +// x^171072 mod p(x), x^171008 mod p(x) +DATA ·KoopConst+1408(SB)/8, $0x00000001e45e3694 +DATA ·KoopConst+1416(SB)/8, $0x0000000117b9ab5c + +// x^170048 mod p(x), x^169984 mod p(x) +DATA ·KoopConst+1424(SB)/8, $0x000000005423dd8c +DATA ·KoopConst+1432(SB)/8, $0x00000001115dff5e + +// x^169024 mod p(x), x^168960 mod p(x) +DATA ·KoopConst+1440(SB)/8, $0x00000001a1e67766 +DATA ·KoopConst+1448(SB)/8, $0x0000000117fad29c + +// x^168000 mod p(x), x^167936 mod p(x) +DATA ·KoopConst+1456(SB)/8, $0x0000000041a3f508 +DATA ·KoopConst+1464(SB)/8, $0x000000017de134e6 + +// x^166976 mod p(x), x^166912 mod p(x) +DATA ·KoopConst+1472(SB)/8, $0x000000003e792f7e +DATA ·KoopConst+1480(SB)/8, $0x00000000a2f5d19c + +// x^165952 mod p(x), x^165888 mod p(x) +DATA ·KoopConst+1488(SB)/8, $0x00000000c8948aaa +DATA ·KoopConst+1496(SB)/8, $0x00000000dee13658 + +// x^164928 mod p(x), x^164864 mod p(x) +DATA ·KoopConst+1504(SB)/8, $0x000000005d4ccb36 +DATA ·KoopConst+1512(SB)/8, $0x000000015355440c + +// x^163904 mod p(x), x^163840 mod p(x) +DATA ·KoopConst+1520(SB)/8, $0x00000000e92a78a2 +DATA ·KoopConst+1528(SB)/8, $0x0000000197a21778 + +// x^162880 mod p(x), x^162816 mod p(x) +DATA ·KoopConst+1536(SB)/8, $0x000000016ba67caa +DATA ·KoopConst+1544(SB)/8, $0x00000001a3835ec0 + +// x^161856 mod p(x), x^161792 mod p(x) +DATA ·KoopConst+1552(SB)/8, $0x000000004838afc6 +DATA ·KoopConst+1560(SB)/8, $0x0000000011f20912 + +// x^160832 mod p(x), x^160768 mod p(x) +DATA ·KoopConst+1568(SB)/8, $0x000000016644e308 +DATA ·KoopConst+1576(SB)/8, $0x00000001cce9d6cc + +// x^159808 mod p(x), x^159744 mod p(x) +DATA ·KoopConst+1584(SB)/8, $0x0000000037c22f42 +DATA ·KoopConst+1592(SB)/8, $0x0000000084d1e71c + +// x^158784 mod p(x), x^158720 mod p(x) +DATA ·KoopConst+1600(SB)/8, $0x00000001dedba6ca +DATA ·KoopConst+1608(SB)/8, $0x0000000197c2ad54 + +// x^157760 mod p(x), x^157696 mod p(x) +DATA ·KoopConst+1616(SB)/8, $0x0000000146a43500 +DATA ·KoopConst+1624(SB)/8, $0x000000018609261e + +// x^156736 mod p(x), x^156672 mod p(x) +DATA ·KoopConst+1632(SB)/8, $0x000000001cf762de +DATA ·KoopConst+1640(SB)/8, $0x00000000b4b4c224 + +// x^155712 mod p(x), x^155648 mod p(x) +DATA ·KoopConst+1648(SB)/8, $0x0000000022ff7eda +DATA ·KoopConst+1656(SB)/8, $0x0000000080817496 + +// x^154688 mod p(x), x^154624 mod p(x) +DATA ·KoopConst+1664(SB)/8, $0x00000001b6df625e +DATA ·KoopConst+1672(SB)/8, $0x00000001aefb473c + +// x^153664 mod p(x), x^153600 mod p(x) +DATA ·KoopConst+1680(SB)/8, $0x00000001cc99ab58 +DATA ·KoopConst+1688(SB)/8, $0x000000013f1aa474 + +// x^152640 mod p(x), x^152576 mod p(x) +DATA ·KoopConst+1696(SB)/8, $0x00000001c53f5ce2 +DATA ·KoopConst+1704(SB)/8, $0x000000010ca2c756 + +// x^151616 mod p(x), x^151552 mod p(x) +DATA ·KoopConst+1712(SB)/8, $0x0000000082a9c60e +DATA ·KoopConst+1720(SB)/8, $0x000000002c63533a + +// x^150592 mod p(x), x^150528 mod p(x) +DATA ·KoopConst+1728(SB)/8, $0x00000000ec78b570 +DATA ·KoopConst+1736(SB)/8, $0x00000001b7f2ad50 + +// x^149568 mod p(x), x^149504 mod p(x) +DATA ·KoopConst+1744(SB)/8, $0x00000001d3fe1e8e +DATA ·KoopConst+1752(SB)/8, $0x00000000acdf4c20 + +// x^148544 mod p(x), x^148480 mod p(x) +DATA ·KoopConst+1760(SB)/8, $0x000000007f9a7bde +DATA ·KoopConst+1768(SB)/8, $0x000000000bd29e8c + +// x^147520 mod p(x), x^147456 mod p(x) +DATA ·KoopConst+1776(SB)/8, $0x00000000e606f518 +DATA ·KoopConst+1784(SB)/8, $0x00000001eef6992e + +// x^146496 mod p(x), x^146432 mod p(x) +DATA ·KoopConst+1792(SB)/8, $0x000000008538cb96 +DATA ·KoopConst+1800(SB)/8, $0x00000000b01644e6 + +// x^145472 mod p(x), x^145408 mod p(x) +DATA ·KoopConst+1808(SB)/8, $0x0000000131d030b2 +DATA ·KoopConst+1816(SB)/8, $0x0000000059c51acc + +// x^144448 mod p(x), x^144384 mod p(x) +DATA ·KoopConst+1824(SB)/8, $0x00000000115a4d0e +DATA ·KoopConst+1832(SB)/8, $0x00000001a2849272 + +// x^143424 mod p(x), x^143360 mod p(x) +DATA ·KoopConst+1840(SB)/8, $0x00000000e8a5356e +DATA ·KoopConst+1848(SB)/8, $0x00000001a4e0b610 + +// x^142400 mod p(x), x^142336 mod p(x) +DATA ·KoopConst+1856(SB)/8, $0x0000000158d988be +DATA ·KoopConst+1864(SB)/8, $0x00000000084e81a6 + +// x^141376 mod p(x), x^141312 mod p(x) +DATA ·KoopConst+1872(SB)/8, $0x00000001240db498 +DATA ·KoopConst+1880(SB)/8, $0x00000001b71f1fd8 + +// x^140352 mod p(x), x^140288 mod p(x) +DATA ·KoopConst+1888(SB)/8, $0x000000009ce87826 +DATA ·KoopConst+1896(SB)/8, $0x000000017f7df380 + +// x^139328 mod p(x), x^139264 mod p(x) +DATA ·KoopConst+1904(SB)/8, $0x0000000021944aae +DATA ·KoopConst+1912(SB)/8, $0x00000001f7f4e190 + +// x^138304 mod p(x), x^138240 mod p(x) +DATA ·KoopConst+1920(SB)/8, $0x00000001cea3d67e +DATA ·KoopConst+1928(SB)/8, $0x0000000150220d86 + +// x^137280 mod p(x), x^137216 mod p(x) +DATA ·KoopConst+1936(SB)/8, $0x000000004434e926 +DATA ·KoopConst+1944(SB)/8, $0x00000001db7d2b2e + +// x^136256 mod p(x), x^136192 mod p(x) +DATA ·KoopConst+1952(SB)/8, $0x0000000011db8cbe +DATA ·KoopConst+1960(SB)/8, $0x00000000b6ba9668 + +// x^135232 mod p(x), x^135168 mod p(x) +DATA ·KoopConst+1968(SB)/8, $0x00000001f6e0b8dc +DATA ·KoopConst+1976(SB)/8, $0x0000000103fdcecc + +// x^134208 mod p(x), x^134144 mod p(x) +DATA ·KoopConst+1984(SB)/8, $0x00000001f163f4a0 +DATA ·KoopConst+1992(SB)/8, $0x0000000079816a22 + +// x^133184 mod p(x), x^133120 mod p(x) +DATA ·KoopConst+2000(SB)/8, $0x000000007b6cc60e +DATA ·KoopConst+2008(SB)/8, $0x0000000173483482 + +// x^132160 mod p(x), x^132096 mod p(x) +DATA ·KoopConst+2016(SB)/8, $0x000000000f26c82c +DATA ·KoopConst+2024(SB)/8, $0x00000000643ea4c0 + +// x^131136 mod p(x), x^131072 mod p(x) +DATA ·KoopConst+2032(SB)/8, $0x00000000b0acad80 +DATA ·KoopConst+2040(SB)/8, $0x00000000a64752d2 + +// x^130112 mod p(x), x^130048 mod p(x) +DATA ·KoopConst+2048(SB)/8, $0x000000013687e91c +DATA ·KoopConst+2056(SB)/8, $0x00000000ca98eb3a + +// x^129088 mod p(x), x^129024 mod p(x) +DATA ·KoopConst+2064(SB)/8, $0x000000006bac3a96 +DATA ·KoopConst+2072(SB)/8, $0x00000001ca6ac8f8 + +// x^128064 mod p(x), x^128000 mod p(x) +DATA ·KoopConst+2080(SB)/8, $0x00000001bf197d5c +DATA ·KoopConst+2088(SB)/8, $0x00000001c48e2e68 + +// x^127040 mod p(x), x^126976 mod p(x) +DATA ·KoopConst+2096(SB)/8, $0x00000000256e84f2 +DATA ·KoopConst+2104(SB)/8, $0x0000000070086782 + +// x^126016 mod p(x), x^125952 mod p(x) +DATA ·KoopConst+2112(SB)/8, $0x000000003eff0d16 +DATA ·KoopConst+2120(SB)/8, $0x00000000f763621c + +// x^124992 mod p(x), x^124928 mod p(x) +DATA ·KoopConst+2128(SB)/8, $0x00000001748e9fd2 +DATA ·KoopConst+2136(SB)/8, $0x00000000ba58646a + +// x^123968 mod p(x), x^123904 mod p(x) +DATA ·KoopConst+2144(SB)/8, $0x000000015bb85b42 +DATA ·KoopConst+2152(SB)/8, $0x0000000138e157d8 + +// x^122944 mod p(x), x^122880 mod p(x) +DATA ·KoopConst+2160(SB)/8, $0x0000000164d1a980 +DATA ·KoopConst+2168(SB)/8, $0x00000001bf0a09dc + +// x^121920 mod p(x), x^121856 mod p(x) +DATA ·KoopConst+2176(SB)/8, $0x000000001415c9f0 +DATA ·KoopConst+2184(SB)/8, $0x0000000098faf300 + +// x^120896 mod p(x), x^120832 mod p(x) +DATA ·KoopConst+2192(SB)/8, $0x0000000195ae2f48 +DATA ·KoopConst+2200(SB)/8, $0x00000001f872f2c6 + +// x^119872 mod p(x), x^119808 mod p(x) +DATA ·KoopConst+2208(SB)/8, $0x0000000059d1d81a +DATA ·KoopConst+2216(SB)/8, $0x00000000f92577be + +// x^118848 mod p(x), x^118784 mod p(x) +DATA ·KoopConst+2224(SB)/8, $0x00000001bf80257a +DATA ·KoopConst+2232(SB)/8, $0x00000001a4d975f4 + +// x^117824 mod p(x), x^117760 mod p(x) +DATA ·KoopConst+2240(SB)/8, $0x000000011e39bfce +DATA ·KoopConst+2248(SB)/8, $0x000000018b74eeca + +// x^116800 mod p(x), x^116736 mod p(x) +DATA ·KoopConst+2256(SB)/8, $0x00000001287a0456 +DATA ·KoopConst+2264(SB)/8, $0x00000000e8980404 + +// x^115776 mod p(x), x^115712 mod p(x) +DATA ·KoopConst+2272(SB)/8, $0x00000000a5eb589c +DATA ·KoopConst+2280(SB)/8, $0x0000000176ef2b74 + +// x^114752 mod p(x), x^114688 mod p(x) +DATA ·KoopConst+2288(SB)/8, $0x000000017d71c452 +DATA ·KoopConst+2296(SB)/8, $0x0000000063c85caa + +// x^113728 mod p(x), x^113664 mod p(x) +DATA ·KoopConst+2304(SB)/8, $0x00000000fa941f08 +DATA ·KoopConst+2312(SB)/8, $0x00000001708012cc + +// x^112704 mod p(x), x^112640 mod p(x) +DATA ·KoopConst+2320(SB)/8, $0x0000000064ea030e +DATA ·KoopConst+2328(SB)/8, $0x00000000474d58f6 + +// x^111680 mod p(x), x^111616 mod p(x) +DATA ·KoopConst+2336(SB)/8, $0x000000019b7cc7ba +DATA ·KoopConst+2344(SB)/8, $0x00000001c76085a6 + +// x^110656 mod p(x), x^110592 mod p(x) +DATA ·KoopConst+2352(SB)/8, $0x00000000225cb7ba +DATA ·KoopConst+2360(SB)/8, $0x000000018fb0681a + +// x^109632 mod p(x), x^109568 mod p(x) +DATA ·KoopConst+2368(SB)/8, $0x000000010ab3e1da +DATA ·KoopConst+2376(SB)/8, $0x00000001fcee1f16 + +// x^108608 mod p(x), x^108544 mod p(x) +DATA ·KoopConst+2384(SB)/8, $0x00000001ce5cc33e +DATA ·KoopConst+2392(SB)/8, $0x00000000cfbffb7c + +// x^107584 mod p(x), x^107520 mod p(x) +DATA ·KoopConst+2400(SB)/8, $0x000000005e980f6e +DATA ·KoopConst+2408(SB)/8, $0x000000017af8ee72 + +// x^106560 mod p(x), x^106496 mod p(x) +DATA ·KoopConst+2416(SB)/8, $0x00000000d3bf3f46 +DATA ·KoopConst+2424(SB)/8, $0x000000001c2ad3e2 + +// x^105536 mod p(x), x^105472 mod p(x) +DATA ·KoopConst+2432(SB)/8, $0x000000018d554ae0 +DATA ·KoopConst+2440(SB)/8, $0x00000000ee05450a + +// x^104512 mod p(x), x^104448 mod p(x) +DATA ·KoopConst+2448(SB)/8, $0x000000018e276eb0 +DATA ·KoopConst+2456(SB)/8, $0x000000000f7d5bac + +// x^103488 mod p(x), x^103424 mod p(x) +DATA ·KoopConst+2464(SB)/8, $0x000000001c0319ce +DATA ·KoopConst+2472(SB)/8, $0x00000001cb26e004 + +// x^102464 mod p(x), x^102400 mod p(x) +DATA ·KoopConst+2480(SB)/8, $0x00000001ca0c75ec +DATA ·KoopConst+2488(SB)/8, $0x00000001553314e2 + +// x^101440 mod p(x), x^101376 mod p(x) +DATA ·KoopConst+2496(SB)/8, $0x00000001fb075330 +DATA ·KoopConst+2504(SB)/8, $0x000000005729be2c + +// x^100416 mod p(x), x^100352 mod p(x) +DATA ·KoopConst+2512(SB)/8, $0x00000000677920e4 +DATA ·KoopConst+2520(SB)/8, $0x0000000192c4479c + +// x^99392 mod p(x), x^99328 mod p(x) +DATA ·KoopConst+2528(SB)/8, $0x00000000332247c8 +DATA ·KoopConst+2536(SB)/8, $0x0000000078d842b6 + +// x^98368 mod p(x), x^98304 mod p(x) +DATA ·KoopConst+2544(SB)/8, $0x00000000ef84fc6c +DATA ·KoopConst+2552(SB)/8, $0x0000000145ffa282 + +// x^97344 mod p(x), x^97280 mod p(x) +DATA ·KoopConst+2560(SB)/8, $0x0000000139ba7690 +DATA ·KoopConst+2568(SB)/8, $0x000000019d679bf4 + +// x^96320 mod p(x), x^96256 mod p(x) +DATA ·KoopConst+2576(SB)/8, $0x00000000029ef444 +DATA ·KoopConst+2584(SB)/8, $0x000000019412f7a0 + +// x^95296 mod p(x), x^95232 mod p(x) +DATA ·KoopConst+2592(SB)/8, $0x00000001d872048c +DATA ·KoopConst+2600(SB)/8, $0x00000000b28c5c96 + +// x^94272 mod p(x), x^94208 mod p(x) +DATA ·KoopConst+2608(SB)/8, $0x000000016535d70a +DATA ·KoopConst+2616(SB)/8, $0x00000000554bfd44 + +// x^93248 mod p(x), x^93184 mod p(x) +DATA ·KoopConst+2624(SB)/8, $0x00000000761dd222 +DATA ·KoopConst+2632(SB)/8, $0x00000000ce9cfa48 + +// x^92224 mod p(x), x^92160 mod p(x) +DATA ·KoopConst+2640(SB)/8, $0x00000001509a3a44 +DATA ·KoopConst+2648(SB)/8, $0x00000000a4702ab2 + +// x^91200 mod p(x), x^91136 mod p(x) +DATA ·KoopConst+2656(SB)/8, $0x000000007e7019f2 +DATA ·KoopConst+2664(SB)/8, $0x00000001c967fbee + +// x^90176 mod p(x), x^90112 mod p(x) +DATA ·KoopConst+2672(SB)/8, $0x00000000fb4c56ea +DATA ·KoopConst+2680(SB)/8, $0x00000000fd514b3e + +// x^89152 mod p(x), x^89088 mod p(x) +DATA ·KoopConst+2688(SB)/8, $0x000000012022e0ee +DATA ·KoopConst+2696(SB)/8, $0x00000001c0b6f95e + +// x^88128 mod p(x), x^88064 mod p(x) +DATA ·KoopConst+2704(SB)/8, $0x0000000004bc6054 +DATA ·KoopConst+2712(SB)/8, $0x0000000180e103ce + +// x^87104 mod p(x), x^87040 mod p(x) +DATA ·KoopConst+2720(SB)/8, $0x000000017a1a0030 +DATA ·KoopConst+2728(SB)/8, $0x00000001a1630916 + +// x^86080 mod p(x), x^86016 mod p(x) +DATA ·KoopConst+2736(SB)/8, $0x00000001c021a864 +DATA ·KoopConst+2744(SB)/8, $0x000000009a727fb2 + +// x^85056 mod p(x), x^84992 mod p(x) +DATA ·KoopConst+2752(SB)/8, $0x000000009c54421e +DATA ·KoopConst+2760(SB)/8, $0x00000000e83b081a + +// x^84032 mod p(x), x^83968 mod p(x) +DATA ·KoopConst+2768(SB)/8, $0x00000001b4e33e6a +DATA ·KoopConst+2776(SB)/8, $0x000000006b1a1f44 + +// x^83008 mod p(x), x^82944 mod p(x) +DATA ·KoopConst+2784(SB)/8, $0x000000015d615af0 +DATA ·KoopConst+2792(SB)/8, $0x00000000cf280394 + +// x^81984 mod p(x), x^81920 mod p(x) +DATA ·KoopConst+2800(SB)/8, $0x00000001914a3ba8 +DATA ·KoopConst+2808(SB)/8, $0x00000001154b8a9a + +// x^80960 mod p(x), x^80896 mod p(x) +DATA ·KoopConst+2816(SB)/8, $0x000000005f72ec44 +DATA ·KoopConst+2824(SB)/8, $0x0000000149ec63e2 + +// x^79936 mod p(x), x^79872 mod p(x) +DATA ·KoopConst+2832(SB)/8, $0x00000000a33746a8 +DATA ·KoopConst+2840(SB)/8, $0x000000018ef902c4 + +// x^78912 mod p(x), x^78848 mod p(x) +DATA ·KoopConst+2848(SB)/8, $0x00000001c91e90d4 +DATA ·KoopConst+2856(SB)/8, $0x0000000069addb88 + +// x^77888 mod p(x), x^77824 mod p(x) +DATA ·KoopConst+2864(SB)/8, $0x00000001052eb05e +DATA ·KoopConst+2872(SB)/8, $0x00000000e90a29ae + +// x^76864 mod p(x), x^76800 mod p(x) +DATA ·KoopConst+2880(SB)/8, $0x000000006a32f754 +DATA ·KoopConst+2888(SB)/8, $0x00000000c53641ae + +// x^75840 mod p(x), x^75776 mod p(x) +DATA ·KoopConst+2896(SB)/8, $0x00000001ecbd6436 +DATA ·KoopConst+2904(SB)/8, $0x00000000a17c3796 + +// x^74816 mod p(x), x^74752 mod p(x) +DATA ·KoopConst+2912(SB)/8, $0x000000000fd3f93a +DATA ·KoopConst+2920(SB)/8, $0x000000015307a62c + +// x^73792 mod p(x), x^73728 mod p(x) +DATA ·KoopConst+2928(SB)/8, $0x00000001686a4c24 +DATA ·KoopConst+2936(SB)/8, $0x000000002f94bbda + +// x^72768 mod p(x), x^72704 mod p(x) +DATA ·KoopConst+2944(SB)/8, $0x00000001e40afca0 +DATA ·KoopConst+2952(SB)/8, $0x0000000072c8b5e6 + +// x^71744 mod p(x), x^71680 mod p(x) +DATA ·KoopConst+2960(SB)/8, $0x000000012779a2b8 +DATA ·KoopConst+2968(SB)/8, $0x00000000f09b7424 + +// x^70720 mod p(x), x^70656 mod p(x) +DATA ·KoopConst+2976(SB)/8, $0x00000000dcdaeb9e +DATA ·KoopConst+2984(SB)/8, $0x00000001c57de3da + +// x^69696 mod p(x), x^69632 mod p(x) +DATA ·KoopConst+2992(SB)/8, $0x00000001674f7a2a +DATA ·KoopConst+3000(SB)/8, $0x000000013922b30e + +// x^68672 mod p(x), x^68608 mod p(x) +DATA ·KoopConst+3008(SB)/8, $0x00000000dcb9e846 +DATA ·KoopConst+3016(SB)/8, $0x000000008759a6c2 + +// x^67648 mod p(x), x^67584 mod p(x) +DATA ·KoopConst+3024(SB)/8, $0x00000000ea9a6af6 +DATA ·KoopConst+3032(SB)/8, $0x00000000545ae424 + +// x^66624 mod p(x), x^66560 mod p(x) +DATA ·KoopConst+3040(SB)/8, $0x000000006d1f7a74 +DATA ·KoopConst+3048(SB)/8, $0x00000001e0cbafd2 + +// x^65600 mod p(x), x^65536 mod p(x) +DATA ·KoopConst+3056(SB)/8, $0x000000006add215e +DATA ·KoopConst+3064(SB)/8, $0x0000000018360c04 + +// x^64576 mod p(x), x^64512 mod p(x) +DATA ·KoopConst+3072(SB)/8, $0x000000010a9ee4b0 +DATA ·KoopConst+3080(SB)/8, $0x00000000941dc432 + +// x^63552 mod p(x), x^63488 mod p(x) +DATA ·KoopConst+3088(SB)/8, $0x00000000304c48d2 +DATA ·KoopConst+3096(SB)/8, $0x0000000004d3566e + +// x^62528 mod p(x), x^62464 mod p(x) +DATA ·KoopConst+3104(SB)/8, $0x0000000163d0e672 +DATA ·KoopConst+3112(SB)/8, $0x0000000096aed14e + +// x^61504 mod p(x), x^61440 mod p(x) +DATA ·KoopConst+3120(SB)/8, $0x0000000010049166 +DATA ·KoopConst+3128(SB)/8, $0x0000000087c13618 + +// x^60480 mod p(x), x^60416 mod p(x) +DATA ·KoopConst+3136(SB)/8, $0x00000001d3913e34 +DATA ·KoopConst+3144(SB)/8, $0x00000001d52f7b0c + +// x^59456 mod p(x), x^59392 mod p(x) +DATA ·KoopConst+3152(SB)/8, $0x00000001e392d54a +DATA ·KoopConst+3160(SB)/8, $0x000000000182058e + +// x^58432 mod p(x), x^58368 mod p(x) +DATA ·KoopConst+3168(SB)/8, $0x0000000173f2704a +DATA ·KoopConst+3176(SB)/8, $0x00000001ed73aa02 + +// x^57408 mod p(x), x^57344 mod p(x) +DATA ·KoopConst+3184(SB)/8, $0x000000019112b480 +DATA ·KoopConst+3192(SB)/8, $0x000000002721a82e + +// x^56384 mod p(x), x^56320 mod p(x) +DATA ·KoopConst+3200(SB)/8, $0x0000000093d295d6 +DATA ·KoopConst+3208(SB)/8, $0x000000012ca83da2 + +// x^55360 mod p(x), x^55296 mod p(x) +DATA ·KoopConst+3216(SB)/8, $0x0000000114e37f44 +DATA ·KoopConst+3224(SB)/8, $0x00000000da358698 + +// x^54336 mod p(x), x^54272 mod p(x) +DATA ·KoopConst+3232(SB)/8, $0x00000000fcfebc86 +DATA ·KoopConst+3240(SB)/8, $0x0000000011fad322 + +// x^53312 mod p(x), x^53248 mod p(x) +DATA ·KoopConst+3248(SB)/8, $0x00000000834c48d6 +DATA ·KoopConst+3256(SB)/8, $0x000000012b25025c + +// x^52288 mod p(x), x^52224 mod p(x) +DATA ·KoopConst+3264(SB)/8, $0x000000017b909372 +DATA ·KoopConst+3272(SB)/8, $0x000000001290cd24 + +// x^51264 mod p(x), x^51200 mod p(x) +DATA ·KoopConst+3280(SB)/8, $0x000000010156b9ac +DATA ·KoopConst+3288(SB)/8, $0x000000016edd0b06 + +// x^50240 mod p(x), x^50176 mod p(x) +DATA ·KoopConst+3296(SB)/8, $0x0000000113a82fa8 +DATA ·KoopConst+3304(SB)/8, $0x00000000c08e222a + +// x^49216 mod p(x), x^49152 mod p(x) +DATA ·KoopConst+3312(SB)/8, $0x0000000182dacb74 +DATA ·KoopConst+3320(SB)/8, $0x00000000cfb4d10e + +// x^48192 mod p(x), x^48128 mod p(x) +DATA ·KoopConst+3328(SB)/8, $0x000000010210dc40 +DATA ·KoopConst+3336(SB)/8, $0x000000013e156ece + +// x^47168 mod p(x), x^47104 mod p(x) +DATA ·KoopConst+3344(SB)/8, $0x000000008ab5ed20 +DATA ·KoopConst+3352(SB)/8, $0x00000000f12d89f8 + +// x^46144 mod p(x), x^46080 mod p(x) +DATA ·KoopConst+3360(SB)/8, $0x00000000810386fa +DATA ·KoopConst+3368(SB)/8, $0x00000001fce3337c + +// x^45120 mod p(x), x^45056 mod p(x) +DATA ·KoopConst+3376(SB)/8, $0x000000011dce2fe2 +DATA ·KoopConst+3384(SB)/8, $0x00000001c4bf3514 + +// x^44096 mod p(x), x^44032 mod p(x) +DATA ·KoopConst+3392(SB)/8, $0x000000004bb0a390 +DATA ·KoopConst+3400(SB)/8, $0x00000001ae67c492 + +// x^43072 mod p(x), x^43008 mod p(x) +DATA ·KoopConst+3408(SB)/8, $0x00000000028d486a +DATA ·KoopConst+3416(SB)/8, $0x00000000302af704 + +// x^42048 mod p(x), x^41984 mod p(x) +DATA ·KoopConst+3424(SB)/8, $0x000000010e4d63fe +DATA ·KoopConst+3432(SB)/8, $0x00000001e375b250 + +// x^41024 mod p(x), x^40960 mod p(x) +DATA ·KoopConst+3440(SB)/8, $0x000000014fd6f458 +DATA ·KoopConst+3448(SB)/8, $0x00000001678b58c0 + +// x^40000 mod p(x), x^39936 mod p(x) +DATA ·KoopConst+3456(SB)/8, $0x00000000db7a83a2 +DATA ·KoopConst+3464(SB)/8, $0x0000000065103c1e + +// x^38976 mod p(x), x^38912 mod p(x) +DATA ·KoopConst+3472(SB)/8, $0x000000016cf9fa3c +DATA ·KoopConst+3480(SB)/8, $0x000000000ccd28ca + +// x^37952 mod p(x), x^37888 mod p(x) +DATA ·KoopConst+3488(SB)/8, $0x000000016bb33912 +DATA ·KoopConst+3496(SB)/8, $0x0000000059c177d4 + +// x^36928 mod p(x), x^36864 mod p(x) +DATA ·KoopConst+3504(SB)/8, $0x0000000135bda8bc +DATA ·KoopConst+3512(SB)/8, $0x00000001d162f83a + +// x^35904 mod p(x), x^35840 mod p(x) +DATA ·KoopConst+3520(SB)/8, $0x000000004e8c6b76 +DATA ·KoopConst+3528(SB)/8, $0x00000001efc0230c + +// x^34880 mod p(x), x^34816 mod p(x) +DATA ·KoopConst+3536(SB)/8, $0x00000000e17cb750 +DATA ·KoopConst+3544(SB)/8, $0x00000001a2a2e2d2 + +// x^33856 mod p(x), x^33792 mod p(x) +DATA ·KoopConst+3552(SB)/8, $0x000000010e8bb9cc +DATA ·KoopConst+3560(SB)/8, $0x00000001145c9dc2 + +// x^32832 mod p(x), x^32768 mod p(x) +DATA ·KoopConst+3568(SB)/8, $0x00000001859d1cae +DATA ·KoopConst+3576(SB)/8, $0x00000000949e4a48 + +// x^31808 mod p(x), x^31744 mod p(x) +DATA ·KoopConst+3584(SB)/8, $0x0000000167802bbe +DATA ·KoopConst+3592(SB)/8, $0x0000000128beecbc + +// x^30784 mod p(x), x^30720 mod p(x) +DATA ·KoopConst+3600(SB)/8, $0x0000000086f5219c +DATA ·KoopConst+3608(SB)/8, $0x00000001ffc96ae4 + +// x^29760 mod p(x), x^29696 mod p(x) +DATA ·KoopConst+3616(SB)/8, $0x00000001349a4faa +DATA ·KoopConst+3624(SB)/8, $0x00000001ba81e0aa + +// x^28736 mod p(x), x^28672 mod p(x) +DATA ·KoopConst+3632(SB)/8, $0x000000007da3353e +DATA ·KoopConst+3640(SB)/8, $0x0000000104d7df14 + +// x^27712 mod p(x), x^27648 mod p(x) +DATA ·KoopConst+3648(SB)/8, $0x00000000440fba4e +DATA ·KoopConst+3656(SB)/8, $0x00000001c2ff8518 + +// x^26688 mod p(x), x^26624 mod p(x) +DATA ·KoopConst+3664(SB)/8, $0x00000000507aba70 +DATA ·KoopConst+3672(SB)/8, $0x00000000ba6d4708 + +// x^25664 mod p(x), x^25600 mod p(x) +DATA ·KoopConst+3680(SB)/8, $0x0000000015b578b6 +DATA ·KoopConst+3688(SB)/8, $0x00000001d49d4bba + +// x^24640 mod p(x), x^24576 mod p(x) +DATA ·KoopConst+3696(SB)/8, $0x0000000141633fb2 +DATA ·KoopConst+3704(SB)/8, $0x00000000d21247e6 + +// x^23616 mod p(x), x^23552 mod p(x) +DATA ·KoopConst+3712(SB)/8, $0x0000000178712680 +DATA ·KoopConst+3720(SB)/8, $0x0000000063b4004a + +// x^22592 mod p(x), x^22528 mod p(x) +DATA ·KoopConst+3728(SB)/8, $0x000000001404c194 +DATA ·KoopConst+3736(SB)/8, $0x0000000094f55d2c + +// x^21568 mod p(x), x^21504 mod p(x) +DATA ·KoopConst+3744(SB)/8, $0x00000000469dbe46 +DATA ·KoopConst+3752(SB)/8, $0x00000001ca68fe74 + +// x^20544 mod p(x), x^20480 mod p(x) +DATA ·KoopConst+3760(SB)/8, $0x00000000fb093fd8 +DATA ·KoopConst+3768(SB)/8, $0x00000001fd7d1b4c + +// x^19520 mod p(x), x^19456 mod p(x) +DATA ·KoopConst+3776(SB)/8, $0x00000000767a2bfe +DATA ·KoopConst+3784(SB)/8, $0x0000000055982d0c + +// x^18496 mod p(x), x^18432 mod p(x) +DATA ·KoopConst+3792(SB)/8, $0x00000001344e22bc +DATA ·KoopConst+3800(SB)/8, $0x00000000221553a6 + +// x^17472 mod p(x), x^17408 mod p(x) +DATA ·KoopConst+3808(SB)/8, $0x0000000161cd9978 +DATA ·KoopConst+3816(SB)/8, $0x000000013d9a153a + +// x^16448 mod p(x), x^16384 mod p(x) +DATA ·KoopConst+3824(SB)/8, $0x00000001d702e906 +DATA ·KoopConst+3832(SB)/8, $0x00000001cd108b3c + +// x^15424 mod p(x), x^15360 mod p(x) +DATA ·KoopConst+3840(SB)/8, $0x00000001c7db9908 +DATA ·KoopConst+3848(SB)/8, $0x00000001d0af0f4a + +// x^14400 mod p(x), x^14336 mod p(x) +DATA ·KoopConst+3856(SB)/8, $0x00000001665d025c +DATA ·KoopConst+3864(SB)/8, $0x00000001196cf0ec + +// x^13376 mod p(x), x^13312 mod p(x) +DATA ·KoopConst+3872(SB)/8, $0x000000012df97c0e +DATA ·KoopConst+3880(SB)/8, $0x00000001c88c9704 + +// x^12352 mod p(x), x^12288 mod p(x) +DATA ·KoopConst+3888(SB)/8, $0x000000006fed84da +DATA ·KoopConst+3896(SB)/8, $0x000000002013d300 + +// x^11328 mod p(x), x^11264 mod p(x) +DATA ·KoopConst+3904(SB)/8, $0x00000000b094146e +DATA ·KoopConst+3912(SB)/8, $0x00000001c458501e + +// x^10304 mod p(x), x^10240 mod p(x) +DATA ·KoopConst+3920(SB)/8, $0x00000001ceb518a6 +DATA ·KoopConst+3928(SB)/8, $0x000000003ce14802 + +// x^9280 mod p(x), x^9216 mod p(x) +DATA ·KoopConst+3936(SB)/8, $0x000000011f16db0a +DATA ·KoopConst+3944(SB)/8, $0x00000000bb72bb98 + +// x^8256 mod p(x), x^8192 mod p(x) +DATA ·KoopConst+3952(SB)/8, $0x00000001d4aa130e +DATA ·KoopConst+3960(SB)/8, $0x00000000fb9aeaba + +// x^7232 mod p(x), x^7168 mod p(x) +DATA ·KoopConst+3968(SB)/8, $0x00000001991f01d2 +DATA ·KoopConst+3976(SB)/8, $0x000000000131f5e6 + +// x^6208 mod p(x), x^6144 mod p(x) +DATA ·KoopConst+3984(SB)/8, $0x000000006bd58b4c +DATA ·KoopConst+3992(SB)/8, $0x0000000089d5799a + +// x^5184 mod p(x), x^5120 mod p(x) +DATA ·KoopConst+4000(SB)/8, $0x000000007272c166 +DATA ·KoopConst+4008(SB)/8, $0x00000000474c43b0 + +// x^4160 mod p(x), x^4096 mod p(x) +DATA ·KoopConst+4016(SB)/8, $0x000000013974e6f8 +DATA ·KoopConst+4024(SB)/8, $0x00000001db991f34 + +// x^3136 mod p(x), x^3072 mod p(x) +DATA ·KoopConst+4032(SB)/8, $0x000000000bd6e03c +DATA ·KoopConst+4040(SB)/8, $0x000000004b1bfd00 + +// x^2112 mod p(x), x^2048 mod p(x) +DATA ·KoopConst+4048(SB)/8, $0x000000005988c652 +DATA ·KoopConst+4056(SB)/8, $0x000000004036b796 + +// x^1088 mod p(x), x^1024 mod p(x) +DATA ·KoopConst+4064(SB)/8, $0x00000000129ef036 +DATA ·KoopConst+4072(SB)/8, $0x000000000c5ec3d4 + +// x^2048 mod p(x), x^2016 mod p(x), x^1984 mod p(x), x^1952 mod p(x) +DATA ·KoopConst+4080(SB)/8, $0xd6f94847201b5bcb +DATA ·KoopConst+4088(SB)/8, $0x1efc02e79571e892 + +// x^1920 mod p(x), x^1888 mod p(x), x^1856 mod p(x), x^1824 mod p(x) +DATA ·KoopConst+4096(SB)/8, $0xce08adcc294c1393 +DATA ·KoopConst+4104(SB)/8, $0x0b269b5c5ab5f161 + +// x^1792 mod p(x), x^1760 mod p(x), x^1728 mod p(x), x^1696 mod p(x) +DATA ·KoopConst+4112(SB)/8, $0x17315505e4201e72 +DATA ·KoopConst+4120(SB)/8, $0x2e841f4784acf3e9 + +// x^1664 mod p(x), x^1632 mod p(x), x^1600 mod p(x), x^1568 mod p(x) +DATA ·KoopConst+4128(SB)/8, $0x37cfc3a67cc667e3 +DATA ·KoopConst+4136(SB)/8, $0x7020425856bc424b + +// x^1536 mod p(x), x^1504 mod p(x), x^1472 mod p(x), x^1440 mod p(x) +DATA ·KoopConst+4144(SB)/8, $0x8e2fa3369218d2c3 +DATA ·KoopConst+4152(SB)/8, $0xdf81bf923f7c6ef1 + +// x^1408 mod p(x), x^1376 mod p(x), x^1344 mod p(x), x^1312 mod p(x) +DATA ·KoopConst+4160(SB)/8, $0x5ce20d2d39ed1981 +DATA ·KoopConst+4168(SB)/8, $0x9d0898a0af5ddc43 + +// x^1280 mod p(x), x^1248 mod p(x), x^1216 mod p(x), x^1184 mod p(x) +DATA ·KoopConst+4176(SB)/8, $0x6f7f4546ca081e03 +DATA ·KoopConst+4184(SB)/8, $0x4992836903fda047 + +// x^1152 mod p(x), x^1120 mod p(x), x^1088 mod p(x), x^1056 mod p(x) +DATA ·KoopConst+4192(SB)/8, $0xfd4f413b9bf11d68 +DATA ·KoopConst+4200(SB)/8, $0xf4ddf452094f781b + +// x^1024 mod p(x), x^992 mod p(x), x^960 mod p(x), x^928 mod p(x) +DATA ·KoopConst+4208(SB)/8, $0x11d84204062f61ea +DATA ·KoopConst+4216(SB)/8, $0x9487f1e51f3588cf + +// x^896 mod p(x), x^864 mod p(x), x^832 mod p(x), x^800 mod p(x) +DATA ·KoopConst+4224(SB)/8, $0xfaedf111abf58a1f +DATA ·KoopConst+4232(SB)/8, $0x31da2c22b1384ec9 + +// x^768 mod p(x), x^736 mod p(x), x^704 mod p(x), x^672 mod p(x) +DATA ·KoopConst+4240(SB)/8, $0x0246b541e8f81b22 +DATA ·KoopConst+4248(SB)/8, $0xc857ede58a42eb47 + +// x^640 mod p(x), x^608 mod p(x), x^576 mod p(x), x^544 mod p(x) +DATA ·KoopConst+4256(SB)/8, $0xd4dbfa9b92b0372e +DATA ·KoopConst+4264(SB)/8, $0xe0354c0b2cd1c09a + +// x^512 mod p(x), x^480 mod p(x), x^448 mod p(x), x^416 mod p(x) +DATA ·KoopConst+4272(SB)/8, $0x5f36c79cfc4417ec +DATA ·KoopConst+4280(SB)/8, $0x4b92cf8d54b8f25b + +// x^384 mod p(x), x^352 mod p(x), x^320 mod p(x), x^288 mod p(x) +DATA ·KoopConst+4288(SB)/8, $0xdad234918345041e +DATA ·KoopConst+4296(SB)/8, $0x4e44c81828229301 + +// x^256 mod p(x), x^224 mod p(x), x^192 mod p(x), x^160 mod p(x) +DATA ·KoopConst+4304(SB)/8, $0x56fd28cc8e02f1d0 +DATA ·KoopConst+4312(SB)/8, $0x3da5e43c8ee9ee84 + +// x^128 mod p(x), x^96 mod p(x), x^64 mod p(x), x^32 mod p(x) +DATA ·KoopConst+4320(SB)/8, $0xa583017cdfcb9f08 +DATA ·KoopConst+4328(SB)/8, $0xeb31d82e0c62ab26 + +GLOBL ·KoopConst(SB), RODATA, $4336 + +// Barrett constant m - (4^32)/n +DATA ·KoopBarConst(SB)/8, $0x0000000017d232cd +DATA ·KoopBarConst+8(SB)/8, $0x0000000000000000 +DATA ·KoopBarConst+16(SB)/8, $0x00000001d663b05d +DATA ·KoopBarConst+24(SB)/8, $0x0000000000000000 +GLOBL ·KoopBarConst(SB), RODATA, $32 diff --git a/vendor/github.com/klauspost/crc32/gen.go b/vendor/github.com/klauspost/crc32/gen.go new file mode 100644 index 00000000000..fb3040a7dca --- /dev/null +++ b/vendor/github.com/klauspost/crc32/gen.go @@ -0,0 +1,7 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen_const_ppc64le.go + +package crc32 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c index 78b6f5b8a8a..387a5e96559 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c @@ -1,7 +1,7 @@ #ifndef USE_LIBSQLITE3 /****************************************************************************** ** This file is an amalgamation of many separate C source files from SQLite -** version 3.50.4. By combining all the individual C code files into this +** version 3.51.1. By combining all the individual C code files into this ** single large file, the entire code can be compiled as a single translation ** unit. This allows many compilers to do optimizations that would not be ** possible if the files were compiled separately. Performance improvements @@ -19,7 +19,7 @@ ** separate file. This file contains only code for the core SQLite library. ** ** The content in this amalgamation comes from Fossil check-in -** 4d8adfb30e03f9cf27f800a2c1ba3c48fb4c with changes in files: +** 281fc0e9afc38674b9b0991943b9e9d1e64c with changes in files: ** ** */ @@ -171,7 +171,9 @@ #define HAVE_UTIME 1 #else /* This is not VxWorks. */ -#define OS_VXWORKS 0 +#ifndef OS_VXWORKS +# define OS_VXWORKS 0 +#endif #define HAVE_FCHOWN 1 #define HAVE_READLINK 1 #define HAVE_LSTAT 1 @@ -466,9 +468,12 @@ extern "C" { ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.50.4" -#define SQLITE_VERSION_NUMBER 3050004 -#define SQLITE_SOURCE_ID "2025-07-30 19:33:53 4d8adfb30e03f9cf27f800a2c1ba3c48fb4ca1b08b0f5ed59a4d5ecbf45e20a3" +#define SQLITE_VERSION "3.51.1" +#define SQLITE_VERSION_NUMBER 3051001 +#define SQLITE_SOURCE_ID "2025-11-28 17:28:25 281fc0e9afc38674b9b0991943b9e9d1e64c6cbdb133d35f6f5c87ff6af38a88" +#define SQLITE_SCM_BRANCH "branch-3.51" +#define SQLITE_SCM_TAGS "release version-3.51.1" +#define SQLITE_SCM_DATETIME "2025-11-28T17:28:25.933Z" /* ** CAPI3REF: Run-Time Library Version Numbers @@ -488,9 +493,9 @@ extern "C" { ** assert( strcmp(sqlite3_libversion(),SQLITE_VERSION)==0 ); ** )^ ** -** ^The sqlite3_version[] string constant contains the text of [SQLITE_VERSION] -** macro. ^The sqlite3_libversion() function returns a pointer to the -** to the sqlite3_version[] string constant. The sqlite3_libversion() +** ^The sqlite3_version[] string constant contains the text of the +** [SQLITE_VERSION] macro. ^The sqlite3_libversion() function returns a +** pointer to the sqlite3_version[] string constant. The sqlite3_libversion() ** function is provided for use in DLLs since DLL users usually do not have ** direct access to string constants within the DLL. ^The ** sqlite3_libversion_number() function returns an integer equal to @@ -690,7 +695,7 @@ typedef int (*sqlite3_callback)(void*,int,char**, char**); ** without having to use a lot of C code. ** ** ^The sqlite3_exec() interface runs zero or more UTF-8 encoded, -** semicolon-separate SQL statements passed into its 2nd argument, +** semicolon-separated SQL statements passed into its 2nd argument, ** in the context of the [database connection] passed in as its 1st ** argument. ^If the callback function of the 3rd argument to ** sqlite3_exec() is not NULL, then it is invoked for each result row @@ -723,7 +728,7 @@ typedef int (*sqlite3_callback)(void*,int,char**, char**); ** result row is NULL then the corresponding string pointer for the ** sqlite3_exec() callback is a NULL pointer. ^The 4th argument to the ** sqlite3_exec() callback is an array of pointers to strings where each -** entry represents the name of corresponding result column as obtained +** entry represents the name of a corresponding result column as obtained ** from [sqlite3_column_name()]. ** ** ^If the 2nd parameter to sqlite3_exec() is a NULL pointer, a pointer @@ -817,6 +822,9 @@ SQLITE_API int sqlite3_exec( #define SQLITE_ERROR_MISSING_COLLSEQ (SQLITE_ERROR | (1<<8)) #define SQLITE_ERROR_RETRY (SQLITE_ERROR | (2<<8)) #define SQLITE_ERROR_SNAPSHOT (SQLITE_ERROR | (3<<8)) +#define SQLITE_ERROR_RESERVESIZE (SQLITE_ERROR | (4<<8)) +#define SQLITE_ERROR_KEY (SQLITE_ERROR | (5<<8)) +#define SQLITE_ERROR_UNABLE (SQLITE_ERROR | (6<<8)) #define SQLITE_IOERR_READ (SQLITE_IOERR | (1<<8)) #define SQLITE_IOERR_SHORT_READ (SQLITE_IOERR | (2<<8)) #define SQLITE_IOERR_WRITE (SQLITE_IOERR | (3<<8)) @@ -851,6 +859,8 @@ SQLITE_API int sqlite3_exec( #define SQLITE_IOERR_DATA (SQLITE_IOERR | (32<<8)) #define SQLITE_IOERR_CORRUPTFS (SQLITE_IOERR | (33<<8)) #define SQLITE_IOERR_IN_PAGE (SQLITE_IOERR | (34<<8)) +#define SQLITE_IOERR_BADKEY (SQLITE_IOERR | (35<<8)) +#define SQLITE_IOERR_CODEC (SQLITE_IOERR | (36<<8)) #define SQLITE_LOCKED_SHAREDCACHE (SQLITE_LOCKED | (1<<8)) #define SQLITE_LOCKED_VTAB (SQLITE_LOCKED | (2<<8)) #define SQLITE_BUSY_RECOVERY (SQLITE_BUSY | (1<<8)) @@ -909,7 +919,7 @@ SQLITE_API int sqlite3_exec( ** Note in particular that passing the SQLITE_OPEN_EXCLUSIVE flag into ** [sqlite3_open_v2()] does *not* cause the underlying database file ** to be opened using O_EXCL. Passing SQLITE_OPEN_EXCLUSIVE into -** [sqlite3_open_v2()] has historically be a no-op and might become an +** [sqlite3_open_v2()] has historically been a no-op and might become an ** error in future versions of SQLite. */ #define SQLITE_OPEN_READONLY 0x00000001 /* Ok for sqlite3_open_v2() */ @@ -1003,7 +1013,7 @@ SQLITE_API int sqlite3_exec( ** SQLite uses one of these integer values as the second ** argument to calls it makes to the xLock() and xUnlock() methods ** of an [sqlite3_io_methods] object. These values are ordered from -** lest restrictive to most restrictive. +** least restrictive to most restrictive. ** ** The argument to xLock() is always SHARED or higher. The argument to ** xUnlock is either SHARED or NONE. @@ -1244,7 +1254,7 @@ struct sqlite3_io_methods { ** connection. See also [SQLITE_FCNTL_FILE_POINTER]. ** **
  • [[SQLITE_FCNTL_SYNC_OMITTED]] -** No longer in use. +** The SQLITE_FCNTL_SYNC_OMITTED file-control is no longer used. ** **
  • [[SQLITE_FCNTL_SYNC]] ** The [SQLITE_FCNTL_SYNC] opcode is generated internally by SQLite and @@ -1319,7 +1329,7 @@ struct sqlite3_io_methods { ** **
  • [[SQLITE_FCNTL_VFSNAME]] ** ^The [SQLITE_FCNTL_VFSNAME] opcode can be used to obtain the names of -** all [VFSes] in the VFS stack. The names are of all VFS shims and the +** all [VFSes] in the VFS stack. The names of all VFS shims and the ** final bottom-level VFS are written into memory obtained from ** [sqlite3_malloc()] and the result is stored in the char* variable ** that the fourth parameter of [sqlite3_file_control()] points to. @@ -1333,7 +1343,7 @@ struct sqlite3_io_methods { ** ^The [SQLITE_FCNTL_VFS_POINTER] opcode finds a pointer to the top-level ** [VFSes] currently in use. ^(The argument X in ** sqlite3_file_control(db,SQLITE_FCNTL_VFS_POINTER,X) must be -** of type "[sqlite3_vfs] **". This opcodes will set *X +** of type "[sqlite3_vfs] **". This opcode will set *X ** to a pointer to the top-level VFS.)^ ** ^When there are multiple VFS shims in the stack, this opcode finds the ** upper-most shim only. @@ -1523,7 +1533,7 @@ struct sqlite3_io_methods { **
  • [[SQLITE_FCNTL_EXTERNAL_READER]] ** The EXPERIMENTAL [SQLITE_FCNTL_EXTERNAL_READER] opcode is used to detect ** whether or not there is a database client in another process with a wal-mode -** transaction open on the database or not. It is only available on unix.The +** transaction open on the database or not. It is only available on unix. The ** (void*) argument passed with this file-control should be a pointer to a ** value of type (int). The integer value is set to 1 if the database is a wal ** mode database and there exists at least one client in another process that @@ -1541,6 +1551,15 @@ struct sqlite3_io_methods { ** database is not a temp db, then the [SQLITE_FCNTL_RESET_CACHE] file-control ** purges the contents of the in-memory page cache. If there is an open ** transaction, or if the db is a temp-db, this opcode is a no-op, not an error. +** +**
  • [[SQLITE_FCNTL_FILESTAT]] +** The [SQLITE_FCNTL_FILESTAT] opcode returns low-level diagnostic information +** about the [sqlite3_file] objects used access the database and journal files +** for the given schema. The fourth parameter to [sqlite3_file_control()] +** should be an initialized [sqlite3_str] pointer. JSON text describing +** various aspects of the sqlite3_file object is appended to the sqlite3_str. +** The SQLITE_FCNTL_FILESTAT opcode is usually a no-op, unless compile-time +** options are used to enable it. ** */ #define SQLITE_FCNTL_LOCKSTATE 1 @@ -1586,6 +1605,7 @@ struct sqlite3_io_methods { #define SQLITE_FCNTL_RESET_CACHE 42 #define SQLITE_FCNTL_NULL_IO 43 #define SQLITE_FCNTL_BLOCK_ON_CONNECT 44 +#define SQLITE_FCNTL_FILESTAT 45 /* deprecated names */ #define SQLITE_GET_LOCKPROXYFILE SQLITE_FCNTL_GET_LOCKPROXYFILE @@ -1948,7 +1968,7 @@ struct sqlite3_vfs { ** SQLite interfaces so that an application usually does not need to ** invoke sqlite3_initialize() directly. For example, [sqlite3_open()] ** calls sqlite3_initialize() so the SQLite library will be automatically -** initialized when [sqlite3_open()] is called if it has not be initialized +** initialized when [sqlite3_open()] is called if it has not been initialized ** already. ^However, if SQLite is compiled with the [SQLITE_OMIT_AUTOINIT] ** compile-time option, then the automatic calls to sqlite3_initialize() ** are omitted and the application must call sqlite3_initialize() directly @@ -2205,21 +2225,21 @@ struct sqlite3_mem_methods { ** The [sqlite3_mem_methods] ** structure is filled with the currently defined memory allocation routines.)^ ** This option can be used to overload the default memory allocation -** routines with a wrapper that simulations memory allocation failure or +** routines with a wrapper that simulates memory allocation failure or ** tracks memory usage, for example. ** ** [[SQLITE_CONFIG_SMALL_MALLOC]]
    SQLITE_CONFIG_SMALL_MALLOC
    -**
    ^The SQLITE_CONFIG_SMALL_MALLOC option takes single argument of +**
    ^The SQLITE_CONFIG_SMALL_MALLOC option takes a single argument of ** type int, interpreted as a boolean, which if true provides a hint to ** SQLite that it should avoid large memory allocations if possible. ** SQLite will run faster if it is free to make large memory allocations, -** but some application might prefer to run slower in exchange for +** but some applications might prefer to run slower in exchange for ** guarantees about memory fragmentation that are possible if large ** allocations are avoided. This hint is normally off. **
    ** ** [[SQLITE_CONFIG_MEMSTATUS]]
    SQLITE_CONFIG_MEMSTATUS
    -**
    ^The SQLITE_CONFIG_MEMSTATUS option takes single argument of type int, +**
    ^The SQLITE_CONFIG_MEMSTATUS option takes a single argument of type int, ** interpreted as a boolean, which enables or disables the collection of ** memory allocation statistics. ^(When memory allocation statistics are ** disabled, the following SQLite interfaces become non-operational: @@ -2264,7 +2284,7 @@ struct sqlite3_mem_methods { ** ^If pMem is NULL and N is non-zero, then each database connection ** does an initial bulk allocation for page cache memory ** from [sqlite3_malloc()] sufficient for N cache lines if N is positive or -** of -1024*N bytes if N is negative, . ^If additional +** of -1024*N bytes if N is negative. ^If additional ** page cache memory is needed beyond what is provided by the initial ** allocation, then SQLite goes to [sqlite3_malloc()] separately for each ** additional cache line.
    @@ -2293,7 +2313,7 @@ struct sqlite3_mem_methods { **
    ^(The SQLITE_CONFIG_MUTEX option takes a single argument which is a ** pointer to an instance of the [sqlite3_mutex_methods] structure. ** The argument specifies alternative low-level mutex routines to be used -** in place the mutex routines built into SQLite.)^ ^SQLite makes a copy of +** in place of the mutex routines built into SQLite.)^ ^SQLite makes a copy of ** the content of the [sqlite3_mutex_methods] structure before the call to ** [sqlite3_config()] returns. ^If SQLite is compiled with ** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then @@ -2335,7 +2355,7 @@ struct sqlite3_mem_methods { ** ** [[SQLITE_CONFIG_GETPCACHE2]]
    SQLITE_CONFIG_GETPCACHE2
    **
    ^(The SQLITE_CONFIG_GETPCACHE2 option takes a single argument which -** is a pointer to an [sqlite3_pcache_methods2] object. SQLite copies of +** is a pointer to an [sqlite3_pcache_methods2] object. SQLite copies off ** the current page cache implementation into that object.)^
    ** ** [[SQLITE_CONFIG_LOG]]
    SQLITE_CONFIG_LOG
    @@ -2352,7 +2372,7 @@ struct sqlite3_mem_methods { ** the logger function is a copy of the first parameter to the corresponding ** [sqlite3_log()] call and is intended to be a [result code] or an ** [extended result code]. ^The third parameter passed to the logger is -** log message after formatting via [sqlite3_snprintf()]. +** a log message after formatting via [sqlite3_snprintf()]. ** The SQLite logging interface is not reentrant; the logger function ** supplied by the application must not invoke any SQLite interface. ** In a multi-threaded application, the application-defined logger @@ -2543,7 +2563,7 @@ struct sqlite3_mem_methods { ** These constants are the available integer configuration options that ** can be passed as the second parameter to the [sqlite3_db_config()] interface. ** -** The [sqlite3_db_config()] interface is a var-args functions. It takes a +** The [sqlite3_db_config()] interface is a var-args function. It takes a ** variable number of parameters, though always at least two. The number of ** parameters passed into sqlite3_db_config() depends on which of these ** constants is given as the second parameter. This documentation page @@ -2655,17 +2675,20 @@ struct sqlite3_mem_methods { ** ** [[SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER]] **
    SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER
    -**
    ^This option is used to enable or disable the -** [fts3_tokenizer()] function which is part of the -** [FTS3] full-text search engine extension. -** There must be two additional arguments. -** The first argument is an integer which is 0 to disable fts3_tokenizer() or -** positive to enable fts3_tokenizer() or negative to leave the setting -** unchanged. -** The second parameter is a pointer to an integer into which -** is written 0 or 1 to indicate whether fts3_tokenizer is disabled or enabled -** following this call. The second parameter may be a NULL pointer, in -** which case the new setting is not reported back.
    +**
    ^This option is used to enable or disable using the +** [fts3_tokenizer()] function - part of the [FTS3] full-text search engine +** extension - without using bound parameters as the parameters. Doing so +** is disabled by default. There must be two additional arguments. The first +** argument is an integer. If it is passed 0, then using fts3_tokenizer() +** without bound parameters is disabled. If it is passed a positive value, +** then calling fts3_tokenizer without bound parameters is enabled. If it +** is passed a negative value, this setting is not modified - this can be +** used to query for the current setting. The second parameter is a pointer +** to an integer into which is written 0 or 1 to indicate the current value +** of this setting (after it is modified, if applicable). The second +** parameter may be a NULL pointer, in which case the value of the setting +** is not reported back. Refer to [FTS3] documentation for further details. +**
    ** ** [[SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION]] **
    SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION
    @@ -2677,8 +2700,8 @@ struct sqlite3_mem_methods { ** When the first argument to this interface is 1, then only the C-API is ** enabled and the SQL function remains disabled. If the first argument to ** this interface is 0, then both the C-API and the SQL function are disabled. -** If the first argument is -1, then no changes are made to state of either the -** C-API or the SQL function. +** If the first argument is -1, then no changes are made to the state of either +** the C-API or the SQL function. ** The second parameter is a pointer to an integer into which ** is written 0 or 1 to indicate whether [sqlite3_load_extension()] interface ** is disabled or enabled following this call. The second parameter may @@ -2796,7 +2819,7 @@ struct sqlite3_mem_methods { ** [[SQLITE_DBCONFIG_LEGACY_ALTER_TABLE]] **
    SQLITE_DBCONFIG_LEGACY_ALTER_TABLE
    **
    The SQLITE_DBCONFIG_LEGACY_ALTER_TABLE option activates or deactivates -** the legacy behavior of the [ALTER TABLE RENAME] command such it +** the legacy behavior of the [ALTER TABLE RENAME] command such that it ** behaves as it did prior to [version 3.24.0] (2018-06-04). See the ** "Compatibility Notice" on the [ALTER TABLE RENAME documentation] for ** additional information. This feature can also be turned on and off @@ -2845,7 +2868,7 @@ struct sqlite3_mem_methods { **
    SQLITE_DBCONFIG_LEGACY_FILE_FORMAT
    **
    The SQLITE_DBCONFIG_LEGACY_FILE_FORMAT option activates or deactivates ** the legacy file format flag. When activated, this flag causes all newly -** created database file to have a schema format version number (the 4-byte +** created database files to have a schema format version number (the 4-byte ** integer found at offset 44 into the database header) of 1. This in turn ** means that the resulting database file will be readable and writable by ** any SQLite version back to 3.0.0 ([dateof:3.0.0]). Without this setting, @@ -2872,7 +2895,7 @@ struct sqlite3_mem_methods { ** the database handle both when the SQL statement is prepared and when it ** is stepped. The flag is set (collection of statistics is enabled) ** by default.

    This option takes two arguments: an integer and a pointer to -** an integer.. The first argument is 1, 0, or -1 to enable, disable, or +** an integer. The first argument is 1, 0, or -1 to enable, disable, or ** leave unchanged the statement scanstatus option. If the second argument ** is not NULL, then the value of the statement scanstatus setting after ** processing the first argument is written into the integer that the second @@ -2915,8 +2938,8 @@ struct sqlite3_mem_methods { **

    The SQLITE_DBCONFIG_ENABLE_ATTACH_WRITE option enables or disables the ** ability of the [ATTACH DATABASE] SQL command to open a database for writing. ** This capability is enabled by default. Applications can disable or -** reenable this capability using the current DBCONFIG option. If the -** the this capability is disabled, the [ATTACH] command will still work, +** reenable this capability using the current DBCONFIG option. If +** this capability is disabled, the [ATTACH] command will still work, ** but the database will be opened read-only. If this option is disabled, ** then the ability to create a new database using [ATTACH] is also disabled, ** regardless of the value of the [SQLITE_DBCONFIG_ENABLE_ATTACH_CREATE] @@ -2950,7 +2973,7 @@ struct sqlite3_mem_methods { ** **

    Most of the SQLITE_DBCONFIG options take two arguments, so that the ** overall call to [sqlite3_db_config()] has a total of four parameters. -** The first argument (the third parameter to sqlite3_db_config()) is a integer. +** The first argument (the third parameter to sqlite3_db_config()) is an integer. ** The second argument is a pointer to an integer. If the first argument is 1, ** then the option becomes enabled. If the first integer argument is 0, then the ** option is disabled. If the first argument is -1, then the option setting @@ -3240,7 +3263,7 @@ SQLITE_API int sqlite3_is_interrupted(sqlite3*); ** ^These routines return 0 if the statement is incomplete. ^If a ** memory allocation fails, then SQLITE_NOMEM is returned. ** -** ^These routines do not parse the SQL statements thus +** ^These routines do not parse the SQL statements and thus ** will not detect syntactically incorrect SQL. ** ** ^(If SQLite has not been initialized using [sqlite3_initialize()] prior @@ -3357,7 +3380,7 @@ SQLITE_API int sqlite3_busy_timeout(sqlite3*, int ms); ** indefinitely if possible. The results of passing any other negative value ** are undefined. ** -** Internally, each SQLite database handle store two timeout values - the +** Internally, each SQLite database handle stores two timeout values - the ** busy-timeout (used for rollback mode databases, or if the VFS does not ** support blocking locks) and the setlk-timeout (used for blocking locks ** on wal-mode databases). The sqlite3_busy_timeout() method sets both @@ -3387,7 +3410,7 @@ SQLITE_API int sqlite3_setlk_timeout(sqlite3*, int ms, int flags); ** This is a legacy interface that is preserved for backwards compatibility. ** Use of this interface is not recommended. ** -** Definition: A result table is memory data structure created by the +** Definition: A result table is a memory data structure created by the ** [sqlite3_get_table()] interface. A result table records the ** complete query results from one or more queries. ** @@ -3530,7 +3553,7 @@ SQLITE_API char *sqlite3_vsnprintf(int,char*,const char*, va_list); ** ^Calling sqlite3_free() with a pointer previously returned ** by sqlite3_malloc() or sqlite3_realloc() releases that memory so ** that it might be reused. ^The sqlite3_free() routine is -** a no-op if is called with a NULL pointer. Passing a NULL pointer +** a no-op if it is called with a NULL pointer. Passing a NULL pointer ** to sqlite3_free() is harmless. After being freed, memory ** should neither be read nor written. Even reading previously freed ** memory might result in a segmentation fault or other severe error. @@ -3548,13 +3571,13 @@ SQLITE_API char *sqlite3_vsnprintf(int,char*,const char*, va_list); ** sqlite3_free(X). ** ^sqlite3_realloc(X,N) returns a pointer to a memory allocation ** of at least N bytes in size or NULL if insufficient memory is available. -** ^If M is the size of the prior allocation, then min(N,M) bytes -** of the prior allocation are copied into the beginning of buffer returned +** ^If M is the size of the prior allocation, then min(N,M) bytes of the +** prior allocation are copied into the beginning of the buffer returned ** by sqlite3_realloc(X,N) and the prior allocation is freed. ** ^If sqlite3_realloc(X,N) returns NULL and N is positive, then the ** prior allocation is not freed. ** -** ^The sqlite3_realloc64(X,N) interfaces works the same as +** ^The sqlite3_realloc64(X,N) interface works the same as ** sqlite3_realloc(X,N) except that N is a 64-bit unsigned integer instead ** of a 32-bit signed integer. ** @@ -3604,7 +3627,7 @@ SQLITE_API sqlite3_uint64 sqlite3_msize(void*); ** was last reset. ^The values returned by [sqlite3_memory_used()] and ** [sqlite3_memory_highwater()] include any overhead ** added by SQLite in its implementation of [sqlite3_malloc()], -** but not overhead added by the any underlying system library +** but not overhead added by any underlying system library ** routines that [sqlite3_malloc()] may call. ** ** ^The memory high-water mark is reset to the current value of @@ -4056,7 +4079,7 @@ SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*); ** there is no harm in trying.) ** ** ^(

    [SQLITE_OPEN_SHAREDCACHE]
    -**
    The database is opened [shared cache] enabled, overriding +**
    The database is opened with [shared cache] enabled, overriding ** the default shared cache setting provided by ** [sqlite3_enable_shared_cache()].)^ ** The [use of shared cache mode is discouraged] and hence shared cache @@ -4064,7 +4087,7 @@ SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*); ** this option is a no-op. ** ** ^(
    [SQLITE_OPEN_PRIVATECACHE]
    -**
    The database is opened [shared cache] disabled, overriding +**
    The database is opened with [shared cache] disabled, overriding ** the default shared cache setting provided by ** [sqlite3_enable_shared_cache()].)^ ** @@ -4482,7 +4505,7 @@ SQLITE_API void sqlite3_free_filename(sqlite3_filename); ** subsequent calls to other SQLite interface functions.)^ ** ** ^The sqlite3_errstr(E) interface returns the English-language text -** that describes the [result code] E, as UTF-8, or NULL if E is not an +** that describes the [result code] E, as UTF-8, or NULL if E is not a ** result code for which a text error message is available. ** ^(Memory to hold the error message string is managed internally ** and must not be freed by the application)^. @@ -4490,7 +4513,7 @@ SQLITE_API void sqlite3_free_filename(sqlite3_filename); ** ^If the most recent error references a specific token in the input ** SQL, the sqlite3_error_offset() interface returns the byte offset ** of the start of that token. ^The byte offset returned by -** sqlite3_error_offset() assumes that the input SQL is UTF8. +** sqlite3_error_offset() assumes that the input SQL is UTF-8. ** ^If the most recent error does not reference a specific token in the input ** SQL, then the sqlite3_error_offset() function returns -1. ** @@ -4515,6 +4538,34 @@ SQLITE_API const void *sqlite3_errmsg16(sqlite3*); SQLITE_API const char *sqlite3_errstr(int); SQLITE_API int sqlite3_error_offset(sqlite3 *db); +/* +** CAPI3REF: Set Error Codes And Message +** METHOD: sqlite3 +** +** Set the error code of the database handle passed as the first argument +** to errcode, and the error message to a copy of nul-terminated string +** zErrMsg. If zErrMsg is passed NULL, then the error message is set to +** the default message associated with the supplied error code. Subsequent +** calls to [sqlite3_errcode()] and [sqlite3_errmsg()] and similar will +** return the values set by this routine in place of what was previously +** set by SQLite itself. +** +** This function returns SQLITE_OK if the error code and error message are +** successfully set, SQLITE_NOMEM if an OOM occurs, and SQLITE_MISUSE if +** the database handle is NULL or invalid. +** +** The error code and message set by this routine remains in effect until +** they are changed, either by another call to this routine or until they are +** changed to by SQLite itself to reflect the result of some subsquent +** API call. +** +** This function is intended for use by SQLite extensions or wrappers. The +** idea is that an extension or wrapper can use this routine to set error +** messages and error codes and thus behave more like a core SQLite +** feature from the point of view of an application. +*/ +SQLITE_API int sqlite3_set_errmsg(sqlite3 *db, int errcode, const char *zErrMsg); + /* ** CAPI3REF: Prepared Statement Object ** KEYWORDS: {prepared statement} {prepared statements} @@ -4589,8 +4640,8 @@ SQLITE_API int sqlite3_limit(sqlite3*, int id, int newVal); ** ** These constants define various performance limits ** that can be lowered at run-time using [sqlite3_limit()]. -** The synopsis of the meanings of the various limits is shown below. -** Additional information is available at [limits | Limits in SQLite]. +** A concise description of these limits follows, and additional information +** is available at [limits | Limits in SQLite]. ** **
    ** [[SQLITE_LIMIT_LENGTH]] ^(
    SQLITE_LIMIT_LENGTH
    @@ -4655,7 +4706,7 @@ SQLITE_API int sqlite3_limit(sqlite3*, int id, int newVal); /* ** CAPI3REF: Prepare Flags ** -** These constants define various flags that can be passed into +** These constants define various flags that can be passed into the ** "prepFlags" parameter of the [sqlite3_prepare_v3()] and ** [sqlite3_prepare16_v3()] interfaces. ** @@ -4742,7 +4793,7 @@ SQLITE_API int sqlite3_limit(sqlite3*, int id, int newVal); ** there is a small performance advantage to passing an nByte parameter that ** is the number of bytes in the input string including ** the nul-terminator. -** Note that nByte measure the length of the input in bytes, not +** Note that nByte measures the length of the input in bytes, not ** characters, even for the UTF-16 interfaces. ** ** ^If pzTail is not NULL then *pzTail is made to point to the first byte @@ -4876,7 +4927,7 @@ SQLITE_API int sqlite3_prepare16_v3( ** ** ^The sqlite3_expanded_sql() interface returns NULL if insufficient memory ** is available to hold the result, or if the result would exceed the -** the maximum string length determined by the [SQLITE_LIMIT_LENGTH]. +** maximum string length determined by the [SQLITE_LIMIT_LENGTH]. ** ** ^The [SQLITE_TRACE_SIZE_LIMIT] compile-time option limits the size of ** bound parameter expansions. ^The [SQLITE_OMIT_TRACE] compile-time @@ -5064,7 +5115,7 @@ typedef struct sqlite3_value sqlite3_value; ** ** The context in which an SQL function executes is stored in an ** sqlite3_context object. ^A pointer to an sqlite3_context object -** is always first parameter to [application-defined SQL functions]. +** is always the first parameter to [application-defined SQL functions]. ** The application-defined SQL function implementation will pass this ** pointer through into calls to [sqlite3_result_int | sqlite3_result()], ** [sqlite3_aggregate_context()], [sqlite3_user_data()], @@ -5188,9 +5239,11 @@ typedef struct sqlite3_context sqlite3_context; ** associated with the pointer P of type T. ^D is either a NULL pointer or ** a pointer to a destructor function for P. ^SQLite will invoke the ** destructor D with a single argument of P when it is finished using -** P. The T parameter should be a static string, preferably a string -** literal. The sqlite3_bind_pointer() routine is part of the -** [pointer passing interface] added for SQLite 3.20.0. +** P, even if the call to sqlite3_bind_pointer() fails. Due to a +** historical design quirk, results are undefined if D is +** SQLITE_TRANSIENT. The T parameter should be a static string, +** preferably a string literal. The sqlite3_bind_pointer() routine is +** part of the [pointer passing interface] added for SQLite 3.20.0. ** ** ^If any of the sqlite3_bind_*() routines are called with a NULL pointer ** for the [prepared statement] or with a prepared statement for which @@ -5801,7 +5854,7 @@ SQLITE_API int sqlite3_column_type(sqlite3_stmt*, int iCol); ** ** ^The sqlite3_finalize() function is called to delete a [prepared statement]. ** ^If the most recent evaluation of the statement encountered no errors -** or if the statement is never been evaluated, then sqlite3_finalize() returns +** or if the statement has never been evaluated, then sqlite3_finalize() returns ** SQLITE_OK. ^If the most recent evaluation of statement S failed, then ** sqlite3_finalize(S) returns the appropriate [error code] or ** [extended error code]. @@ -6033,7 +6086,7 @@ SQLITE_API int sqlite3_create_window_function( /* ** CAPI3REF: Text Encodings ** -** These constant define integer codes that represent the various +** These constants define integer codes that represent the various ** text encodings supported by SQLite. */ #define SQLITE_UTF8 1 /* IMP: R-37514-35566 */ @@ -6125,7 +6178,7 @@ SQLITE_API int sqlite3_create_window_function( ** result. ** Every function that invokes [sqlite3_result_subtype()] should have this ** property. If it does not, then the call to [sqlite3_result_subtype()] -** might become a no-op if the function is used as term in an +** might become a no-op if the function is used as a term in an ** [expression index]. On the other hand, SQL functions that never invoke ** [sqlite3_result_subtype()] should avoid setting this property, as the ** purpose of this property is to disable certain optimizations that are @@ -6252,7 +6305,7 @@ SQLITE_API SQLITE_DEPRECATED int sqlite3_memory_alarm(void(*)(void*,sqlite3_int6 ** sqlite3_value_nochange(X) interface returns true if and only if ** the column corresponding to X is unchanged by the UPDATE operation ** that the xUpdate method call was invoked to implement and if -** and the prior [xColumn] method call that was invoked to extracted +** the prior [xColumn] method call that was invoked to extract ** the value for that column returned without setting a result (probably ** because it queried [sqlite3_vtab_nochange()] and found that the column ** was unchanging). ^Within an [xUpdate] method, any value for which @@ -6525,6 +6578,7 @@ SQLITE_API void sqlite3_set_auxdata(sqlite3_context*, int N, void*, void (*)(voi ** or a NULL pointer if there were no prior calls to ** sqlite3_set_clientdata() with the same values of D and N. ** Names are compared using strcmp() and are thus case sensitive. +** It returns 0 on success and SQLITE_NOMEM on allocation failure. ** ** If P and X are both non-NULL, then the destructor X is invoked with ** argument P on the first of the following occurrences: @@ -9201,9 +9255,18 @@ SQLITE_API int sqlite3_status64( ** ^The sqlite3_db_status() routine returns SQLITE_OK on success and a ** non-zero [error code] on failure. ** +** ^The sqlite3_db_status64(D,O,C,H,R) routine works exactly the same +** way as sqlite3_db_status(D,O,C,H,R) routine except that the C and H +** parameters are pointer to 64-bit integers (type: sqlite3_int64) instead +** of pointers to 32-bit integers, which allows larger status values +** to be returned. If a status value exceeds 2,147,483,647 then +** sqlite3_db_status() will truncate the value whereas sqlite3_db_status64() +** will return the full value. +** ** See also: [sqlite3_status()] and [sqlite3_stmt_status()]. */ SQLITE_API int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int resetFlg); +SQLITE_API int sqlite3_db_status64(sqlite3*,int,sqlite3_int64*,sqlite3_int64*,int); /* ** CAPI3REF: Status Parameters for database connections @@ -9300,6 +9363,10 @@ SQLITE_API int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int r ** If an IO or other error occurs while writing a page to disk, the effect ** on subsequent SQLITE_DBSTATUS_CACHE_WRITE requests is undefined.)^ ^The ** highwater mark associated with SQLITE_DBSTATUS_CACHE_WRITE is always 0. +**

    +** ^(There is overlap between the quantities measured by this parameter +** (SQLITE_DBSTATUS_CACHE_WRITE) and SQLITE_DBSTATUS_TEMPBUF_SPILL. +** Resetting one will reduce the other.)^ **

    ** ** [[SQLITE_DBSTATUS_CACHE_SPILL]] ^(
    SQLITE_DBSTATUS_CACHE_SPILL
    @@ -9315,6 +9382,18 @@ SQLITE_API int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int r **
    This parameter returns zero for the current value if and only if ** all foreign key constraints (deferred or immediate) have been ** resolved.)^ ^The highwater mark is always 0. +** +** [[SQLITE_DBSTATUS_TEMPBUF_SPILL] ^(
    SQLITE_DBSTATUS_TEMPBUF_SPILL
    +**
    ^(This parameter returns the number of bytes written to temporary +** files on disk that could have been kept in memory had sufficient memory +** been available. This value includes writes to intermediate tables that +** are part of complex queries, external sorts that spill to disk, and +** writes to TEMP tables.)^ +** ^The highwater mark is always 0. +**

    +** ^(There is overlap between the quantities measured by this parameter +** (SQLITE_DBSTATUS_TEMPBUF_SPILL) and SQLITE_DBSTATUS_CACHE_WRITE. +** Resetting one will reduce the other.)^ **

    ** */ @@ -9331,7 +9410,8 @@ SQLITE_API int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int r #define SQLITE_DBSTATUS_DEFERRED_FKS 10 #define SQLITE_DBSTATUS_CACHE_USED_SHARED 11 #define SQLITE_DBSTATUS_CACHE_SPILL 12 -#define SQLITE_DBSTATUS_MAX 12 /* Largest defined DBSTATUS */ +#define SQLITE_DBSTATUS_TEMPBUF_SPILL 13 +#define SQLITE_DBSTATUS_MAX 13 /* Largest defined DBSTATUS */ /* @@ -10096,7 +10176,7 @@ SQLITE_API void sqlite3_log(int iErrCode, const char *zFormat, ...); ** is the number of pages currently in the write-ahead log file, ** including those that were just committed. ** -** The callback function should normally return [SQLITE_OK]. ^If an error +** ^The callback function should normally return [SQLITE_OK]. ^If an error ** code is returned, that error will propagate back up through the ** SQLite code base to cause the statement that provoked the callback ** to report an error, though the commit will have still occurred. If the @@ -10104,13 +10184,26 @@ SQLITE_API void sqlite3_log(int iErrCode, const char *zFormat, ...); ** that does not correspond to any valid SQLite error code, the results ** are undefined. ** -** A single database handle may have at most a single write-ahead log callback -** registered at one time. ^Calling [sqlite3_wal_hook()] replaces any -** previously registered write-ahead log callback. ^The return value is -** a copy of the third parameter from the previous call, if any, or 0. -** ^Note that the [sqlite3_wal_autocheckpoint()] interface and the -** [wal_autocheckpoint pragma] both invoke [sqlite3_wal_hook()] and will -** overwrite any prior [sqlite3_wal_hook()] settings. +** ^A single database handle may have at most a single write-ahead log +** callback registered at one time. ^Calling [sqlite3_wal_hook()] +** replaces the default behavior or previously registered write-ahead +** log callback. +** +** ^The return value is a copy of the third parameter from the +** previous call, if any, or 0. +** +** ^The [sqlite3_wal_autocheckpoint()] interface and the +** [wal_autocheckpoint pragma] both invoke [sqlite3_wal_hook()] and +** will overwrite any prior [sqlite3_wal_hook()] settings. +** +** ^If a write-ahead log callback is set using this function then +** [sqlite3_wal_checkpoint_v2()] or [PRAGMA wal_checkpoint] +** should be invoked periodically to keep the write-ahead log file +** from growing without bound. +** +** ^Passing a NULL pointer for the callback disables automatic +** checkpointing entirely. To re-enable the default behavior, call +** sqlite3_wal_autocheckpoint(db,1000) or use [PRAGMA wal_checkpoint]. */ SQLITE_API void *sqlite3_wal_hook( sqlite3*, @@ -10127,7 +10220,7 @@ SQLITE_API void *sqlite3_wal_hook( ** to automatically [checkpoint] ** after committing a transaction if there are N or ** more frames in the [write-ahead log] file. ^Passing zero or -** a negative value as the nFrame parameter disables automatic +** a negative value as the N parameter disables automatic ** checkpoints entirely. ** ** ^The callback registered by this function replaces any existing callback @@ -10143,9 +10236,10 @@ SQLITE_API void *sqlite3_wal_hook( ** ** ^Every new [database connection] defaults to having the auto-checkpoint ** enabled with a threshold of 1000 or [SQLITE_DEFAULT_WAL_AUTOCHECKPOINT] -** pages. The use of this interface -** is only necessary if the default setting is found to be suboptimal -** for a particular application. +** pages. +** +** ^The use of this interface is only necessary if the default setting +** is found to be suboptimal for a particular application. */ SQLITE_API int sqlite3_wal_autocheckpoint(sqlite3 *db, int N); @@ -10210,6 +10304,11 @@ SQLITE_API int sqlite3_wal_checkpoint(sqlite3 *db, const char *zDb); ** ^This mode works the same way as SQLITE_CHECKPOINT_RESTART with the ** addition that it also truncates the log file to zero bytes just prior ** to a successful return. +** +**
    SQLITE_CHECKPOINT_NOOP
    +** ^This mode always checkpoints zero frames. The only reason to invoke +** a NOOP checkpoint is to access the values returned by +** sqlite3_wal_checkpoint_v2() via output parameters *pnLog and *pnCkpt. ** ** ** ^If pnLog is not NULL, then *pnLog is set to the total number of frames in @@ -10280,6 +10379,7 @@ SQLITE_API int sqlite3_wal_checkpoint_v2( ** See the [sqlite3_wal_checkpoint_v2()] documentation for details on the ** meaning of each of these checkpoint modes. */ +#define SQLITE_CHECKPOINT_NOOP -1 /* Do no work at all */ #define SQLITE_CHECKPOINT_PASSIVE 0 /* Do as much as possible w/o blocking */ #define SQLITE_CHECKPOINT_FULL 1 /* Wait for writers, then checkpoint */ #define SQLITE_CHECKPOINT_RESTART 2 /* Like FULL but wait for readers */ @@ -10648,7 +10748,7 @@ SQLITE_API int sqlite3_vtab_in(sqlite3_index_info*, int iCons, int bHandle); **   ){ **   // do something with pVal **   } -**   if( rc!=SQLITE_OK ){ +**   if( rc!=SQLITE_DONE ){ **   // an error has occurred **   } ** )^ @@ -11107,7 +11207,7 @@ typedef struct sqlite3_snapshot { ** The [sqlite3_snapshot_get()] interface is only available when the ** [SQLITE_ENABLE_SNAPSHOT] compile-time option is used. */ -SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_get( +SQLITE_API int sqlite3_snapshot_get( sqlite3 *db, const char *zSchema, sqlite3_snapshot **ppSnapshot @@ -11156,7 +11256,7 @@ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_get( ** The [sqlite3_snapshot_open()] interface is only available when the ** [SQLITE_ENABLE_SNAPSHOT] compile-time option is used. */ -SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_open( +SQLITE_API int sqlite3_snapshot_open( sqlite3 *db, const char *zSchema, sqlite3_snapshot *pSnapshot @@ -11173,7 +11273,7 @@ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_open( ** The [sqlite3_snapshot_free()] interface is only available when the ** [SQLITE_ENABLE_SNAPSHOT] compile-time option is used. */ -SQLITE_API SQLITE_EXPERIMENTAL void sqlite3_snapshot_free(sqlite3_snapshot*); +SQLITE_API void sqlite3_snapshot_free(sqlite3_snapshot*); /* ** CAPI3REF: Compare the ages of two snapshot handles. @@ -11200,7 +11300,7 @@ SQLITE_API SQLITE_EXPERIMENTAL void sqlite3_snapshot_free(sqlite3_snapshot*); ** This interface is only available if SQLite is compiled with the ** [SQLITE_ENABLE_SNAPSHOT] option. */ -SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_cmp( +SQLITE_API int sqlite3_snapshot_cmp( sqlite3_snapshot *p1, sqlite3_snapshot *p2 ); @@ -11228,7 +11328,7 @@ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_cmp( ** This interface is only available if SQLite is compiled with the ** [SQLITE_ENABLE_SNAPSHOT] option. */ -SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_recover(sqlite3 *db, const char *zDb); +SQLITE_API int sqlite3_snapshot_recover(sqlite3 *db, const char *zDb); /* ** CAPI3REF: Serialize a database @@ -11302,12 +11402,13 @@ SQLITE_API unsigned char *sqlite3_serialize( ** ** The sqlite3_deserialize(D,S,P,N,M,F) interface causes the ** [database connection] D to disconnect from database S and then -** reopen S as an in-memory database based on the serialization contained -** in P. The serialized database P is N bytes in size. M is the size of -** the buffer P, which might be larger than N. If M is larger than N, and -** the SQLITE_DESERIALIZE_READONLY bit is not set in F, then SQLite is -** permitted to add content to the in-memory database as long as the total -** size does not exceed M bytes. +** reopen S as an in-memory database based on the serialization +** contained in P. If S is a NULL pointer, the main database is +** used. The serialized database P is N bytes in size. M is the size +** of the buffer P, which might be larger than N. If M is larger than +** N, and the SQLITE_DESERIALIZE_READONLY bit is not set in F, then +** SQLite is permitted to add content to the in-memory database as +** long as the total size does not exceed M bytes. ** ** If the SQLITE_DESERIALIZE_FREEONCLOSE bit is set in F, then SQLite will ** invoke sqlite3_free() on the serialization buffer when the database @@ -11374,6 +11475,54 @@ SQLITE_API int sqlite3_deserialize( #define SQLITE_DESERIALIZE_RESIZEABLE 2 /* Resize using sqlite3_realloc64() */ #define SQLITE_DESERIALIZE_READONLY 4 /* Database is read-only */ +/* +** CAPI3REF: Bind array values to the CARRAY table-valued function +** +** The sqlite3_carray_bind(S,I,P,N,F,X) interface binds an array value to +** one of the first argument of the [carray() table-valued function]. The +** S parameter is a pointer to the [prepared statement] that uses the carray() +** functions. I is the parameter index to be bound. P is a pointer to the +** array to be bound, and N is the number of eements in the array. The +** F argument is one of constants [SQLITE_CARRAY_INT32], [SQLITE_CARRAY_INT64], +** [SQLITE_CARRAY_DOUBLE], [SQLITE_CARRAY_TEXT], or [SQLITE_CARRAY_BLOB] to +** indicate the datatype of the array being bound. The X argument is not a +** NULL pointer, then SQLite will invoke the function X on the P parameter +** after it has finished using P, even if the call to +** sqlite3_carray_bind() fails. The special-case finalizer +** SQLITE_TRANSIENT has no effect here. +*/ +SQLITE_API int sqlite3_carray_bind( + sqlite3_stmt *pStmt, /* Statement to be bound */ + int i, /* Parameter index */ + void *aData, /* Pointer to array data */ + int nData, /* Number of data elements */ + int mFlags, /* CARRAY flags */ + void (*xDel)(void*) /* Destructor for aData */ +); + +/* +** CAPI3REF: Datatypes for the CARRAY table-valued function +** +** The fifth argument to the [sqlite3_carray_bind()] interface musts be +** one of the following constants, to specify the datatype of the array +** that is being bound into the [carray table-valued function]. +*/ +#define SQLITE_CARRAY_INT32 0 /* Data is 32-bit signed integers */ +#define SQLITE_CARRAY_INT64 1 /* Data is 64-bit signed integers */ +#define SQLITE_CARRAY_DOUBLE 2 /* Data is doubles */ +#define SQLITE_CARRAY_TEXT 3 /* Data is char* */ +#define SQLITE_CARRAY_BLOB 4 /* Data is struct iovec */ + +/* +** Versions of the above #defines that omit the initial SQLITE_, for +** legacy compatibility. +*/ +#define CARRAY_INT32 0 /* Data is 32-bit signed integers */ +#define CARRAY_INT64 1 /* Data is 64-bit signed integers */ +#define CARRAY_DOUBLE 2 /* Data is doubles */ +#define CARRAY_TEXT 3 /* Data is char* */ +#define CARRAY_BLOB 4 /* Data is struct iovec */ + /* ** Undo the hack that converts floating point types to integer for ** builds on processors without floating point support. @@ -12633,14 +12782,32 @@ SQLITE_API void sqlite3changegroup_delete(sqlite3_changegroup*); ** update the "main" database attached to handle db with the changes found in ** the changeset passed via the second and third arguments. ** +** All changes made by these functions are enclosed in a savepoint transaction. +** If any other error (aside from a constraint failure when attempting to +** write to the target database) occurs, then the savepoint transaction is +** rolled back, restoring the target database to its original state, and an +** SQLite error code returned. Additionally, starting with version 3.51.0, +** an error code and error message that may be accessed using the +** [sqlite3_errcode()] and [sqlite3_errmsg()] APIs are left in the database +** handle. +** ** The fourth argument (xFilter) passed to these functions is the "filter -** callback". If it is not NULL, then for each table affected by at least one -** change in the changeset, the filter callback is invoked with -** the table name as the second argument, and a copy of the context pointer -** passed as the sixth argument as the first. If the "filter callback" -** returns zero, then no attempt is made to apply any changes to the table. -** Otherwise, if the return value is non-zero or the xFilter argument to -** is NULL, all changes related to the table are attempted. +** callback". This may be passed NULL, in which case all changes in the +** changeset are applied to the database. For sqlite3changeset_apply() and +** sqlite3_changeset_apply_v2(), if it is not NULL, then it is invoked once +** for each table affected by at least one change in the changeset. In this +** case the table name is passed as the second argument, and a copy of +** the context pointer passed as the sixth argument to apply() or apply_v2() +** as the first. If the "filter callback" returns zero, then no attempt is +** made to apply any changes to the table. Otherwise, if the return value is +** non-zero, all changes related to the table are attempted. +** +** For sqlite3_changeset_apply_v3(), the xFilter callback is invoked once +** per change. The second argument in this case is an sqlite3_changeset_iter +** that may be queried using the usual APIs for the details of the current +** change. If the "filter callback" returns zero in this case, then no attempt +** is made to apply the current change. If it returns non-zero, the change +** is applied. ** ** For each table that is not excluded by the filter callback, this function ** tests that the target database contains a compatible table. A table is @@ -12661,11 +12828,11 @@ SQLITE_API void sqlite3changegroup_delete(sqlite3_changegroup*); ** one such warning is issued for each table in the changeset. ** ** For each change for which there is a compatible table, an attempt is made -** to modify the table contents according to the UPDATE, INSERT or DELETE -** change. If a change cannot be applied cleanly, the conflict handler -** function passed as the fifth argument to sqlite3changeset_apply() may be -** invoked. A description of exactly when the conflict handler is invoked for -** each type of change is below. +** to modify the table contents according to each UPDATE, INSERT or DELETE +** change that is not excluded by a filter callback. If a change cannot be +** applied cleanly, the conflict handler function passed as the fifth argument +** to sqlite3changeset_apply() may be invoked. A description of exactly when +** the conflict handler is invoked for each type of change is below. ** ** Unlike the xFilter argument, xConflict may not be passed NULL. The results ** of passing anything other than a valid function pointer as the xConflict @@ -12761,12 +12928,6 @@ SQLITE_API void sqlite3changegroup_delete(sqlite3_changegroup*); ** This can be used to further customize the application's conflict ** resolution strategy. ** -** All changes made by these functions are enclosed in a savepoint transaction. -** If any other error (aside from a constraint failure when attempting to -** write to the target database) occurs, then the savepoint transaction is -** rolled back, restoring the target database to its original state, and an -** SQLite error code returned. -** ** If the output parameters (ppRebase) and (pnRebase) are non-NULL and ** the input is a changeset (not a patchset), then sqlite3changeset_apply_v2() ** may set (*ppRebase) to point to a "rebase" that may be used with the @@ -12816,6 +12977,23 @@ SQLITE_API int sqlite3changeset_apply_v2( void **ppRebase, int *pnRebase, /* OUT: Rebase data */ int flags /* SESSION_CHANGESETAPPLY_* flags */ ); +SQLITE_API int sqlite3changeset_apply_v3( + sqlite3 *db, /* Apply change to "main" db of this handle */ + int nChangeset, /* Size of changeset in bytes */ + void *pChangeset, /* Changeset blob */ + int(*xFilter)( + void *pCtx, /* Copy of sixth arg to _apply() */ + sqlite3_changeset_iter *p /* Handle describing change */ + ), + int(*xConflict)( + void *pCtx, /* Copy of sixth arg to _apply() */ + int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */ + sqlite3_changeset_iter *p /* Handle describing change and conflict */ + ), + void *pCtx, /* First argument passed to xConflict */ + void **ppRebase, int *pnRebase, /* OUT: Rebase data */ + int flags /* SESSION_CHANGESETAPPLY_* flags */ +); /* ** CAPI3REF: Flags for sqlite3changeset_apply_v2 @@ -13235,6 +13413,23 @@ SQLITE_API int sqlite3changeset_apply_v2_strm( void **ppRebase, int *pnRebase, int flags ); +SQLITE_API int sqlite3changeset_apply_v3_strm( + sqlite3 *db, /* Apply change to "main" db of this handle */ + int (*xInput)(void *pIn, void *pData, int *pnData), /* Input function */ + void *pIn, /* First arg for xInput */ + int(*xFilter)( + void *pCtx, /* Copy of sixth arg to _apply() */ + sqlite3_changeset_iter *p + ), + int(*xConflict)( + void *pCtx, /* Copy of sixth arg to _apply() */ + int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */ + sqlite3_changeset_iter *p /* Handle describing change and conflict */ + ), + void *pCtx, /* First argument passed to xConflict */ + void **ppRebase, int *pnRebase, + int flags +); SQLITE_API int sqlite3changeset_concat_strm( int (*xInputA)(void *pIn, void *pData, int *pnData), void *pInA, @@ -14311,7 +14506,7 @@ struct fts5_api { ** Maximum number of pages in one database file. ** ** This is really just the default value for the max_page_count pragma. -** This value can be lowered (or raised) at run-time using that the +** This value can be lowered (or raised) at run-time using the ** max_page_count macro. */ #ifndef SQLITE_MAX_PAGE_COUNT @@ -15179,7 +15374,7 @@ SQLITE_PRIVATE void sqlite3HashClear(Hash*); ** ourselves. */ #ifndef offsetof -#define offsetof(STRUCTURE,FIELD) ((size_t)((char*)&((STRUCTURE*)0)->FIELD)) +# define offsetof(ST,M) ((size_t)((char*)&((ST*)0)->M - (char*)0)) #endif /* @@ -15567,6 +15762,8 @@ SQLITE_PRIVATE u32 sqlite3TreeTrace; ** 0x00020000 Transform DISTINCT into GROUP BY ** 0x00040000 SELECT tree dump after all code has been generated ** 0x00080000 NOT NULL strength reduction +** 0x00100000 Pointers are all shown as zero +** 0x00200000 EXISTS-to-JOIN optimization */ /* @@ -15611,6 +15808,7 @@ SQLITE_PRIVATE u32 sqlite3WhereTrace; ** 0x00020000 Show WHERE terms returned from whereScanNext() ** 0x00040000 Solver overview messages ** 0x00080000 Star-query heuristic +** 0x00100000 Pointers are all shown as zero */ @@ -15683,7 +15881,7 @@ struct BusyHandler { ** pointer will work here as long as it is distinct from SQLITE_STATIC ** and SQLITE_TRANSIENT. */ -#define SQLITE_DYNAMIC ((sqlite3_destructor_type)sqlite3OomClear) +#define SQLITE_DYNAMIC ((sqlite3_destructor_type)sqlite3RowSetClear) /* ** When SQLITE_OMIT_WSD is defined, it means that the target platform does @@ -15904,8 +16102,8 @@ typedef int VList; ** must provide its own VFS implementation together with sqlite3_os_init() ** and sqlite3_os_end() routines. */ -#if !defined(SQLITE_OS_KV) && !defined(SQLITE_OS_OTHER) && \ - !defined(SQLITE_OS_UNIX) && !defined(SQLITE_OS_WIN) +#if SQLITE_OS_KV+1<=1 && SQLITE_OS_OTHER+1<=1 && \ + SQLITE_OS_WIN+1<=1 && SQLITE_OS_UNIX+1<=1 # if defined(_WIN32) || defined(WIN32) || defined(__CYGWIN__) || \ defined(__MINGW32__) || defined(__BORLANDC__) # define SQLITE_OS_WIN 1 @@ -16751,6 +16949,7 @@ struct BtreePayload { SQLITE_PRIVATE int sqlite3BtreeInsert(BtCursor*, const BtreePayload *pPayload, int flags, int seekResult); SQLITE_PRIVATE int sqlite3BtreeFirst(BtCursor*, int *pRes); +SQLITE_PRIVATE int sqlite3BtreeIsEmpty(BtCursor *pCur, int *pRes); SQLITE_PRIVATE int sqlite3BtreeLast(BtCursor*, int *pRes); SQLITE_PRIVATE int sqlite3BtreeNext(BtCursor*, int flags); SQLITE_PRIVATE int sqlite3BtreeEof(BtCursor*); @@ -17084,20 +17283,20 @@ typedef struct VdbeOpList VdbeOpList; #define OP_SorterSort 34 /* jump */ #define OP_Sort 35 /* jump */ #define OP_Rewind 36 /* jump0 */ -#define OP_SorterNext 37 /* jump */ -#define OP_Prev 38 /* jump */ -#define OP_Next 39 /* jump */ -#define OP_IdxLE 40 /* jump, synopsis: key=r[P3@P4] */ -#define OP_IdxGT 41 /* jump, synopsis: key=r[P3@P4] */ -#define OP_IdxLT 42 /* jump, synopsis: key=r[P3@P4] */ +#define OP_IfEmpty 37 /* jump, synopsis: if( empty(P1) ) goto P2 */ +#define OP_SorterNext 38 /* jump */ +#define OP_Prev 39 /* jump */ +#define OP_Next 40 /* jump */ +#define OP_IdxLE 41 /* jump, synopsis: key=r[P3@P4] */ +#define OP_IdxGT 42 /* jump, synopsis: key=r[P3@P4] */ #define OP_Or 43 /* same as TK_OR, synopsis: r[P3]=(r[P1] || r[P2]) */ #define OP_And 44 /* same as TK_AND, synopsis: r[P3]=(r[P1] && r[P2]) */ -#define OP_IdxGE 45 /* jump, synopsis: key=r[P3@P4] */ -#define OP_RowSetRead 46 /* jump, synopsis: r[P3]=rowset(P1) */ -#define OP_RowSetTest 47 /* jump, synopsis: if r[P3] in rowset(P1) goto P2 */ -#define OP_Program 48 /* jump0 */ -#define OP_FkIfZero 49 /* jump, synopsis: if fkctr[P1]==0 goto P2 */ -#define OP_IfPos 50 /* jump, synopsis: if r[P1]>0 then r[P1]-=P3, goto P2 */ +#define OP_IdxLT 45 /* jump, synopsis: key=r[P3@P4] */ +#define OP_IdxGE 46 /* jump, synopsis: key=r[P3@P4] */ +#define OP_RowSetRead 47 /* jump, synopsis: r[P3]=rowset(P1) */ +#define OP_RowSetTest 48 /* jump, synopsis: if r[P3] in rowset(P1) goto P2 */ +#define OP_Program 49 /* jump0 */ +#define OP_FkIfZero 50 /* jump, synopsis: if fkctr[P1]==0 goto P2 */ #define OP_IsNull 51 /* jump, same as TK_ISNULL, synopsis: if r[P1]==NULL goto P2 */ #define OP_NotNull 52 /* jump, same as TK_NOTNULL, synopsis: if r[P1]!=NULL goto P2 */ #define OP_Ne 53 /* jump, same as TK_NE, synopsis: IF r[P3]!=r[P1] */ @@ -17107,49 +17306,49 @@ typedef struct VdbeOpList VdbeOpList; #define OP_Lt 57 /* jump, same as TK_LT, synopsis: IF r[P3]=r[P1] */ #define OP_ElseEq 59 /* jump, same as TK_ESCAPE */ -#define OP_IfNotZero 60 /* jump, synopsis: if r[P1]!=0 then r[P1]--, goto P2 */ -#define OP_DecrJumpZero 61 /* jump, synopsis: if (--r[P1])==0 goto P2 */ -#define OP_IncrVacuum 62 /* jump */ -#define OP_VNext 63 /* jump */ -#define OP_Filter 64 /* jump, synopsis: if key(P3@P4) not in filter(P1) goto P2 */ -#define OP_PureFunc 65 /* synopsis: r[P3]=func(r[P2@NP]) */ -#define OP_Function 66 /* synopsis: r[P3]=func(r[P2@NP]) */ -#define OP_Return 67 -#define OP_EndCoroutine 68 -#define OP_HaltIfNull 69 /* synopsis: if r[P3]=null halt */ -#define OP_Halt 70 -#define OP_Integer 71 /* synopsis: r[P2]=P1 */ -#define OP_Int64 72 /* synopsis: r[P2]=P4 */ -#define OP_String 73 /* synopsis: r[P2]='P4' (len=P1) */ -#define OP_BeginSubrtn 74 /* synopsis: r[P2]=NULL */ -#define OP_Null 75 /* synopsis: r[P2..P3]=NULL */ -#define OP_SoftNull 76 /* synopsis: r[P1]=NULL */ -#define OP_Blob 77 /* synopsis: r[P2]=P4 (len=P1) */ -#define OP_Variable 78 /* synopsis: r[P2]=parameter(P1) */ -#define OP_Move 79 /* synopsis: r[P2@P3]=r[P1@P3] */ -#define OP_Copy 80 /* synopsis: r[P2@P3+1]=r[P1@P3+1] */ -#define OP_SCopy 81 /* synopsis: r[P2]=r[P1] */ -#define OP_IntCopy 82 /* synopsis: r[P2]=r[P1] */ -#define OP_FkCheck 83 -#define OP_ResultRow 84 /* synopsis: output=r[P1@P2] */ -#define OP_CollSeq 85 -#define OP_AddImm 86 /* synopsis: r[P1]=r[P1]+P2 */ -#define OP_RealAffinity 87 -#define OP_Cast 88 /* synopsis: affinity(r[P1]) */ -#define OP_Permutation 89 -#define OP_Compare 90 /* synopsis: r[P1@P3] <-> r[P2@P3] */ -#define OP_IsTrue 91 /* synopsis: r[P2] = coalesce(r[P1]==TRUE,P3) ^ P4 */ -#define OP_ZeroOrNull 92 /* synopsis: r[P2] = 0 OR NULL */ -#define OP_Offset 93 /* synopsis: r[P3] = sqlite_offset(P1) */ -#define OP_Column 94 /* synopsis: r[P3]=PX cursor P1 column P2 */ -#define OP_TypeCheck 95 /* synopsis: typecheck(r[P1@P2]) */ -#define OP_Affinity 96 /* synopsis: affinity(r[P1@P2]) */ -#define OP_MakeRecord 97 /* synopsis: r[P3]=mkrec(r[P1@P2]) */ -#define OP_Count 98 /* synopsis: r[P2]=count() */ -#define OP_ReadCookie 99 -#define OP_SetCookie 100 -#define OP_ReopenIdx 101 /* synopsis: root=P2 iDb=P3 */ -#define OP_OpenRead 102 /* synopsis: root=P2 iDb=P3 */ +#define OP_IfPos 60 /* jump, synopsis: if r[P1]>0 then r[P1]-=P3, goto P2 */ +#define OP_IfNotZero 61 /* jump, synopsis: if r[P1]!=0 then r[P1]--, goto P2 */ +#define OP_DecrJumpZero 62 /* jump, synopsis: if (--r[P1])==0 goto P2 */ +#define OP_IncrVacuum 63 /* jump */ +#define OP_VNext 64 /* jump */ +#define OP_Filter 65 /* jump, synopsis: if key(P3@P4) not in filter(P1) goto P2 */ +#define OP_PureFunc 66 /* synopsis: r[P3]=func(r[P2@NP]) */ +#define OP_Function 67 /* synopsis: r[P3]=func(r[P2@NP]) */ +#define OP_Return 68 +#define OP_EndCoroutine 69 +#define OP_HaltIfNull 70 /* synopsis: if r[P3]=null halt */ +#define OP_Halt 71 +#define OP_Integer 72 /* synopsis: r[P2]=P1 */ +#define OP_Int64 73 /* synopsis: r[P2]=P4 */ +#define OP_String 74 /* synopsis: r[P2]='P4' (len=P1) */ +#define OP_BeginSubrtn 75 /* synopsis: r[P2]=NULL */ +#define OP_Null 76 /* synopsis: r[P2..P3]=NULL */ +#define OP_SoftNull 77 /* synopsis: r[P1]=NULL */ +#define OP_Blob 78 /* synopsis: r[P2]=P4 (len=P1) */ +#define OP_Variable 79 /* synopsis: r[P2]=parameter(P1) */ +#define OP_Move 80 /* synopsis: r[P2@P3]=r[P1@P3] */ +#define OP_Copy 81 /* synopsis: r[P2@P3+1]=r[P1@P3+1] */ +#define OP_SCopy 82 /* synopsis: r[P2]=r[P1] */ +#define OP_IntCopy 83 /* synopsis: r[P2]=r[P1] */ +#define OP_FkCheck 84 +#define OP_ResultRow 85 /* synopsis: output=r[P1@P2] */ +#define OP_CollSeq 86 +#define OP_AddImm 87 /* synopsis: r[P1]=r[P1]+P2 */ +#define OP_RealAffinity 88 +#define OP_Cast 89 /* synopsis: affinity(r[P1]) */ +#define OP_Permutation 90 +#define OP_Compare 91 /* synopsis: r[P1@P3] <-> r[P2@P3] */ +#define OP_IsTrue 92 /* synopsis: r[P2] = coalesce(r[P1]==TRUE,P3) ^ P4 */ +#define OP_ZeroOrNull 93 /* synopsis: r[P2] = 0 OR NULL */ +#define OP_Offset 94 /* synopsis: r[P3] = sqlite_offset(P1) */ +#define OP_Column 95 /* synopsis: r[P3]=PX cursor P1 column P2 */ +#define OP_TypeCheck 96 /* synopsis: typecheck(r[P1@P2]) */ +#define OP_Affinity 97 /* synopsis: affinity(r[P1@P2]) */ +#define OP_MakeRecord 98 /* synopsis: r[P3]=mkrec(r[P1@P2]) */ +#define OP_Count 99 /* synopsis: r[P2]=count() */ +#define OP_ReadCookie 100 +#define OP_SetCookie 101 +#define OP_ReopenIdx 102 /* synopsis: root=P2 iDb=P3 */ #define OP_BitAnd 103 /* same as TK_BITAND, synopsis: r[P3]=r[P1]&r[P2] */ #define OP_BitOr 104 /* same as TK_BITOR, synopsis: r[P3]=r[P1]|r[P2] */ #define OP_ShiftLeft 105 /* same as TK_LSHIFT, synopsis: r[P3]=r[P2]<0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1) */ -#define OP_AggInverse 161 /* synopsis: accum=r[P3] inverse(r[P2@P5]) */ -#define OP_AggStep 162 /* synopsis: accum=r[P3] step(r[P2@P5]) */ -#define OP_AggStep1 163 /* synopsis: accum=r[P3] step(r[P2@P5]) */ -#define OP_AggValue 164 /* synopsis: r[P3]=value N=P2 */ -#define OP_AggFinal 165 /* synopsis: accum=r[P1] N=P2 */ -#define OP_Expire 166 -#define OP_CursorLock 167 -#define OP_CursorUnlock 168 -#define OP_TableLock 169 /* synopsis: iDb=P1 root=P2 write=P3 */ -#define OP_VBegin 170 -#define OP_VCreate 171 -#define OP_VDestroy 172 -#define OP_VOpen 173 -#define OP_VCheck 174 -#define OP_VInitIn 175 /* synopsis: r[P2]=ValueList(P1,P3) */ -#define OP_VColumn 176 /* synopsis: r[P3]=vcolumn(P2) */ -#define OP_VRename 177 -#define OP_Pagecount 178 -#define OP_MaxPgcnt 179 -#define OP_ClrSubtype 180 /* synopsis: r[P1].subtype = 0 */ -#define OP_GetSubtype 181 /* synopsis: r[P2] = r[P1].subtype */ -#define OP_SetSubtype 182 /* synopsis: r[P2].subtype = r[P1] */ -#define OP_FilterAdd 183 /* synopsis: filter(P1) += key(P3@P4) */ -#define OP_Trace 184 -#define OP_CursorHint 185 -#define OP_ReleaseReg 186 /* synopsis: release r[P1@P2] mask P3 */ -#define OP_Noop 187 -#define OP_Explain 188 -#define OP_Abortable 189 +#define OP_DropTrigger 155 +#define OP_IntegrityCk 156 +#define OP_RowSetAdd 157 /* synopsis: rowset(P1)=r[P2] */ +#define OP_Param 158 +#define OP_FkCounter 159 /* synopsis: fkctr[P1]+=P2 */ +#define OP_MemMax 160 /* synopsis: r[P1]=max(r[P1],r[P2]) */ +#define OP_OffsetLimit 161 /* synopsis: if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1) */ +#define OP_AggInverse 162 /* synopsis: accum=r[P3] inverse(r[P2@P5]) */ +#define OP_AggStep 163 /* synopsis: accum=r[P3] step(r[P2@P5]) */ +#define OP_AggStep1 164 /* synopsis: accum=r[P3] step(r[P2@P5]) */ +#define OP_AggValue 165 /* synopsis: r[P3]=value N=P2 */ +#define OP_AggFinal 166 /* synopsis: accum=r[P1] N=P2 */ +#define OP_Expire 167 +#define OP_CursorLock 168 +#define OP_CursorUnlock 169 +#define OP_TableLock 170 /* synopsis: iDb=P1 root=P2 write=P3 */ +#define OP_VBegin 171 +#define OP_VCreate 172 +#define OP_VDestroy 173 +#define OP_VOpen 174 +#define OP_VCheck 175 +#define OP_VInitIn 176 /* synopsis: r[P2]=ValueList(P1,P3) */ +#define OP_VColumn 177 /* synopsis: r[P3]=vcolumn(P2) */ +#define OP_VRename 178 +#define OP_Pagecount 179 +#define OP_MaxPgcnt 180 +#define OP_ClrSubtype 181 /* synopsis: r[P1].subtype = 0 */ +#define OP_GetSubtype 182 /* synopsis: r[P2] = r[P1].subtype */ +#define OP_SetSubtype 183 /* synopsis: r[P2].subtype = r[P1] */ +#define OP_FilterAdd 184 /* synopsis: filter(P1) += key(P3@P4) */ +#define OP_Trace 185 +#define OP_CursorHint 186 +#define OP_ReleaseReg 187 /* synopsis: release r[P1@P2] mask P3 */ +#define OP_Noop 188 +#define OP_Explain 189 +#define OP_Abortable 190 /* Properties such as "out2" or "jump" that are specified in ** comments following the "case" for each opcode in the vdbe.c @@ -17255,26 +17455,26 @@ typedef struct VdbeOpList VdbeOpList; /* 8 */ 0x81, 0x01, 0x01, 0x81, 0x83, 0x83, 0x01, 0x01,\ /* 16 */ 0x03, 0x03, 0x01, 0x12, 0x01, 0xc9, 0xc9, 0xc9,\ /* 24 */ 0xc9, 0x01, 0x49, 0x49, 0x49, 0x49, 0xc9, 0x49,\ -/* 32 */ 0xc1, 0x01, 0x41, 0x41, 0xc1, 0x01, 0x41, 0x41,\ -/* 40 */ 0x41, 0x41, 0x41, 0x26, 0x26, 0x41, 0x23, 0x0b,\ -/* 48 */ 0x81, 0x01, 0x03, 0x03, 0x03, 0x0b, 0x0b, 0x0b,\ -/* 56 */ 0x0b, 0x0b, 0x0b, 0x01, 0x03, 0x03, 0x01, 0x41,\ -/* 64 */ 0x01, 0x00, 0x00, 0x02, 0x02, 0x08, 0x00, 0x10,\ -/* 72 */ 0x10, 0x10, 0x00, 0x10, 0x00, 0x10, 0x10, 0x00,\ -/* 80 */ 0x00, 0x10, 0x10, 0x00, 0x00, 0x00, 0x02, 0x02,\ -/* 88 */ 0x02, 0x00, 0x00, 0x12, 0x1e, 0x20, 0x40, 0x00,\ -/* 96 */ 0x00, 0x00, 0x10, 0x10, 0x00, 0x40, 0x40, 0x26,\ +/* 32 */ 0xc1, 0x01, 0x41, 0x41, 0xc1, 0x01, 0x01, 0x41,\ +/* 40 */ 0x41, 0x41, 0x41, 0x26, 0x26, 0x41, 0x41, 0x23,\ +/* 48 */ 0x0b, 0x81, 0x01, 0x03, 0x03, 0x0b, 0x0b, 0x0b,\ +/* 56 */ 0x0b, 0x0b, 0x0b, 0x01, 0x03, 0x03, 0x03, 0x01,\ +/* 64 */ 0x41, 0x01, 0x00, 0x00, 0x02, 0x02, 0x08, 0x00,\ +/* 72 */ 0x10, 0x10, 0x10, 0x00, 0x10, 0x00, 0x10, 0x10,\ +/* 80 */ 0x00, 0x00, 0x10, 0x10, 0x00, 0x00, 0x00, 0x02,\ +/* 88 */ 0x02, 0x02, 0x00, 0x00, 0x12, 0x1e, 0x20, 0x40,\ +/* 96 */ 0x00, 0x00, 0x00, 0x10, 0x10, 0x00, 0x40, 0x26,\ /* 104 */ 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26,\ -/* 112 */ 0x26, 0x00, 0x40, 0x12, 0x40, 0x40, 0x10, 0x00,\ -/* 120 */ 0x00, 0x00, 0x40, 0x00, 0x40, 0x40, 0x10, 0x10,\ -/* 128 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x50,\ -/* 136 */ 0x00, 0x40, 0x04, 0x04, 0x00, 0x40, 0x50, 0x40,\ -/* 144 */ 0x10, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00,\ -/* 152 */ 0x00, 0x00, 0x10, 0x00, 0x06, 0x10, 0x00, 0x04,\ -/* 160 */ 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\ -/* 168 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x10, 0x50,\ -/* 176 */ 0x40, 0x00, 0x10, 0x10, 0x02, 0x12, 0x12, 0x00,\ -/* 184 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,} +/* 112 */ 0x26, 0x40, 0x00, 0x12, 0x40, 0x40, 0x10, 0x40,\ +/* 120 */ 0x00, 0x00, 0x00, 0x40, 0x00, 0x40, 0x40, 0x10,\ +/* 128 */ 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00,\ +/* 136 */ 0x50, 0x00, 0x40, 0x04, 0x04, 0x00, 0x40, 0x50,\ +/* 144 */ 0x40, 0x10, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00,\ +/* 152 */ 0x00, 0x00, 0x10, 0x00, 0x00, 0x06, 0x10, 0x00,\ +/* 160 */ 0x04, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\ +/* 168 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x10,\ +/* 176 */ 0x50, 0x40, 0x00, 0x10, 0x10, 0x02, 0x12, 0x12,\ +/* 184 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,} /* The resolve3P2Values() routine is able to run faster if it knows ** the value of the largest JUMP opcode. The smaller the maximum @@ -17282,7 +17482,7 @@ typedef struct VdbeOpList VdbeOpList; ** generated this include file strives to group all JUMP opcodes ** together near the beginning of the list. */ -#define SQLITE_MX_JUMP_OPCODE 64 /* Maximum JUMP opcode */ +#define SQLITE_MX_JUMP_OPCODE 65 /* Maximum JUMP opcode */ /************** End of opcodes.h *********************************************/ /************** Continuing where we left off in vdbe.h ***********************/ @@ -17405,8 +17605,11 @@ SQLITE_PRIVATE char *sqlite3VdbeExpandSql(Vdbe*, const char*); #endif SQLITE_PRIVATE int sqlite3MemCompare(const Mem*, const Mem*, const CollSeq*); SQLITE_PRIVATE int sqlite3BlobCompare(const Mem*, const Mem*); +#ifdef SQLITE_ENABLE_PERCENTILE +SQLITE_PRIVATE const char *sqlite3VdbeFuncName(const sqlite3_context*); +#endif -SQLITE_PRIVATE void sqlite3VdbeRecordUnpack(KeyInfo*,int,const void*,UnpackedRecord*); +SQLITE_PRIVATE void sqlite3VdbeRecordUnpack(int,const void*,UnpackedRecord*); SQLITE_PRIVATE int sqlite3VdbeRecordCompare(int,const void*,UnpackedRecord*); SQLITE_PRIVATE int sqlite3VdbeRecordCompareWithSkip(int, const void *, UnpackedRecord *, int); SQLITE_PRIVATE UnpackedRecord *sqlite3VdbeAllocUnpackedRecord(KeyInfo*); @@ -17419,7 +17622,9 @@ SQLITE_PRIVATE int sqlite3VdbeHasSubProgram(Vdbe*); SQLITE_PRIVATE void sqlite3MemSetArrayInt64(sqlite3_value *aMem, int iIdx, i64 val); +#ifndef SQLITE_OMIT_DATETIME_FUNCS SQLITE_PRIVATE int sqlite3NotPureFunc(sqlite3_context*); +#endif #ifdef SQLITE_ENABLE_BYTECODE_VTAB SQLITE_PRIVATE int sqlite3VdbeBytecodeVtabInit(sqlite3*); #endif @@ -18075,7 +18280,7 @@ struct sqlite3 { u8 iDb; /* Which db file is being initialized */ u8 busy; /* TRUE if currently initializing */ unsigned orphanTrigger : 1; /* Last statement is orphaned TEMP trigger */ - unsigned imposterTable : 1; /* Building an imposter table */ + unsigned imposterTable : 2; /* Building an imposter table */ unsigned reopenMemdb : 1; /* ATTACH is really a reopen using MemDB */ const char **azInit; /* "type", "name", and "tbl_name" columns */ } init; @@ -18158,6 +18363,7 @@ struct sqlite3 { i64 nDeferredImmCons; /* Net deferred immediate constraints */ int *pnBytesFreed; /* If not NULL, increment this in DbFree() */ DbClientData *pDbData; /* sqlite3_set_clientdata() content */ + u64 nSpill; /* TEMP content spilled to disk */ #ifdef SQLITE_ENABLE_UNLOCK_NOTIFY /* The following variables are all protected by the STATIC_MAIN ** mutex, not by sqlite3.mutex. They are used by code in notify.c. @@ -18301,6 +18507,7 @@ struct sqlite3 { #define SQLITE_OnePass 0x08000000 /* Single-pass DELETE and UPDATE */ #define SQLITE_OrderBySubq 0x10000000 /* ORDER BY in subquery helps outer */ #define SQLITE_StarQuery 0x20000000 /* Heurists for star queries */ +#define SQLITE_ExistsToJoin 0x40000000 /* The EXISTS-to-JOIN optimization */ #define SQLITE_AllOpts 0xffffffff /* All optimizations */ /* @@ -18539,7 +18746,7 @@ struct FuncDestructor { #define STR_FUNCTION(zName, nArg, pArg, bNC, xFunc) \ {nArg, SQLITE_FUNC_BUILTIN|\ SQLITE_FUNC_SLOCHNG|SQLITE_UTF8|(bNC*SQLITE_FUNC_NEEDCOLL), \ - pArg, 0, xFunc, 0, 0, 0, #zName, } + pArg, 0, xFunc, 0, 0, 0, #zName, {0} } #define LIKEFUNC(zName, nArg, arg, flags) \ {nArg, SQLITE_FUNC_BUILTIN|SQLITE_FUNC_CONSTANT|SQLITE_UTF8|flags, \ (void *)arg, 0, likeFunc, 0, 0, 0, #zName, {0} } @@ -18867,6 +19074,7 @@ struct Table { #define TF_Ephemeral 0x00004000 /* An ephemeral table */ #define TF_Eponymous 0x00008000 /* An eponymous virtual table */ #define TF_Strict 0x00010000 /* STRICT mode */ +#define TF_Imposter 0x00020000 /* An imposter table */ /* ** Allowed values for Table.eTabType @@ -19022,9 +19230,15 @@ struct FKey { ** argument to sqlite3VdbeKeyCompare and is used to control the ** comparison of the two index keys. ** -** Note that aSortOrder[] and aColl[] have nField+1 slots. There -** are nField slots for the columns of an index then one extra slot -** for the rowid at the end. +** The aSortOrder[] and aColl[] arrays have nAllField slots each. There +** are nKeyField slots for the columns of an index then extra slots +** for the rowid or key at the end. The aSortOrder array is located after +** the aColl[] array. +** +** If SQLITE_ENABLE_PREUPDATE_HOOK is defined, then aSortFlags might be NULL +** to indicate that this object is for use by a preupdate hook. When aSortFlags +** is NULL, then nAllField is uninitialized and no space is allocated for +** aColl[], so those fields may not be used. */ struct KeyInfo { u32 nRef; /* Number of references to this KeyInfo object */ @@ -19036,9 +19250,18 @@ struct KeyInfo { CollSeq *aColl[FLEXARRAY]; /* Collating sequence for each term of the key */ }; -/* The size (in bytes) of a KeyInfo object with up to N fields */ +/* The size (in bytes) of a KeyInfo object with up to N fields. This includes +** the main body of the KeyInfo object and the aColl[] array of N elements, +** but does not count the memory used to hold aSortFlags[]. */ #define SZ_KEYINFO(N) (offsetof(KeyInfo,aColl) + (N)*sizeof(CollSeq*)) +/* The size of a bare KeyInfo with no aColl[] entries */ +#if FLEXARRAY+1 > 1 +# define SZ_KEYINFO_0 offsetof(KeyInfo,aColl) +#else +# define SZ_KEYINFO_0 sizeof(KeyInfo) +#endif + /* ** Allowed bit values for entries in the KeyInfo.aSortFlags[] array. */ @@ -19057,9 +19280,8 @@ struct KeyInfo { ** ** An instance of this object serves as a "key" for doing a search on ** an index b+tree. The goal of the search is to find the entry that -** is closed to the key described by this object. This object might hold -** just a prefix of the key. The number of fields is given by -** pKeyInfo->nField. +** is closest to the key described by this object. This object might hold +** just a prefix of the key. The number of fields is given by nField. ** ** The r1 and r2 fields are the values to return if this key is less than ** or greater than a key in the btree, respectively. These are normally @@ -19069,7 +19291,7 @@ struct KeyInfo { ** The key comparison functions actually return default_rc when they find ** an equals comparison. default_rc can be -1, 0, or +1. If there are ** multiple entries in the b-tree with the same key (when only looking -** at the first pKeyInfo->nFields,) then default_rc can be set to -1 to +** at the first nField elements) then default_rc can be set to -1 to ** cause the search to find the last match, or +1 to cause the search to ** find the first match. ** @@ -19081,8 +19303,8 @@ struct KeyInfo { ** b-tree. */ struct UnpackedRecord { - KeyInfo *pKeyInfo; /* Collation and sort-order information */ - Mem *aMem; /* Values */ + KeyInfo *pKeyInfo; /* Comparison info for the index that is unpacked */ + Mem *aMem; /* Values for columns of the index */ union { char *z; /* Cache of aMem[0].z for vdbeRecordCompareString() */ i64 i; /* Cache of aMem[0].u.i for vdbeRecordCompareInt() */ @@ -19731,6 +19953,7 @@ struct SrcItem { unsigned rowidUsed :1; /* The ROWID of this table is referenced */ unsigned fixedSchema :1; /* Uses u4.pSchema, not u4.zDatabase */ unsigned hadSchema :1; /* Had u4.zDatabase before u4.pSchema */ + unsigned fromExists :1; /* Comes from WHERE EXISTS(...) */ } fg; int iCursor; /* The VDBE cursor number used to access this table */ Bitmask colUsed; /* Bit N set if column N used. Details above for N>62 */ @@ -20018,6 +20241,7 @@ struct Select { #define SF_OrderByReqd 0x8000000 /* The ORDER BY clause may not be omitted */ #define SF_UpdateFrom 0x10000000 /* Query originates with UPDATE FROM */ #define SF_Correlated 0x20000000 /* True if references the outer context */ +#define SF_OnToWhere 0x40000000 /* One or more ON clauses moved to WHERE */ /* True if SrcItem X is a subquery that has SF_NestedFrom */ #define IsNestedFrom(X) \ @@ -20261,6 +20485,7 @@ struct Parse { u8 disableLookaside; /* Number of times lookaside has been disabled */ u8 prepFlags; /* SQLITE_PREPARE_* flags */ u8 withinRJSubrtn; /* Nesting level for RIGHT JOIN body subroutines */ + u8 bHasExists; /* Has a correlated "EXISTS (SELECT ....)" expression */ u8 mSubrtnSig; /* mini Bloom filter on available SubrtnSig.selId */ u8 eTriggerOp; /* TK_UPDATE, TK_INSERT or TK_DELETE */ u8 bReturning; /* Coding a RETURNING trigger */ @@ -20770,6 +20995,7 @@ struct Walker { SrcItem *pSrcItem; /* A single FROM clause item */ DbFixer *pFix; /* See sqlite3FixSelect() */ Mem *aMem; /* See sqlite3BtreeCursorHint() */ + struct CheckOnCtx *pCheckOnCtx; /* See selectCheckOnClauses() */ } u; }; @@ -21257,6 +21483,7 @@ SQLITE_PRIVATE void sqlite3ShowTriggerList(const Trigger*); SQLITE_PRIVATE void sqlite3ShowWindow(const Window*); SQLITE_PRIVATE void sqlite3ShowWinFunc(const Window*); #endif +SQLITE_PRIVATE void sqlite3ShowBitvec(Bitvec*); #endif SQLITE_PRIVATE void sqlite3SetString(char **, sqlite3*, const char*); @@ -21573,13 +21800,17 @@ SQLITE_PRIVATE void sqlite3RegisterDateTimeFunctions(void); SQLITE_PRIVATE void sqlite3RegisterJsonFunctions(void); SQLITE_PRIVATE void sqlite3RegisterPerConnectionBuiltinFunctions(sqlite3*); #if !defined(SQLITE_OMIT_VIRTUALTABLE) && !defined(SQLITE_OMIT_JSON) -SQLITE_PRIVATE int sqlite3JsonTableFunctions(sqlite3*); +SQLITE_PRIVATE Module *sqlite3JsonVtabRegister(sqlite3*,const char*); #endif SQLITE_PRIVATE int sqlite3SafetyCheckOk(sqlite3*); SQLITE_PRIVATE int sqlite3SafetyCheckSickOrOk(sqlite3*); SQLITE_PRIVATE void sqlite3ChangeCookie(Parse*, int); SQLITE_PRIVATE With *sqlite3WithDup(sqlite3 *db, With *p); +#if !defined(SQLITE_OMIT_VIRTUALTABLE) && defined(SQLITE_ENABLE_CARRAY) +SQLITE_PRIVATE Module *sqlite3CarrayRegister(sqlite3*); +#endif + #if !defined(SQLITE_OMIT_VIEW) && !defined(SQLITE_OMIT_TRIGGER) SQLITE_PRIVATE void sqlite3MaterializeView(Parse*, Table*, Expr*, ExprList*,Expr*,int); #endif @@ -21800,7 +22031,7 @@ SQLITE_PRIVATE void sqlite3Reindex(Parse*, Token*, Token*); SQLITE_PRIVATE void sqlite3AlterFunctions(void); SQLITE_PRIVATE void sqlite3AlterRenameTable(Parse*, SrcList*, Token*); SQLITE_PRIVATE void sqlite3AlterRenameColumn(Parse*, SrcList*, Token*, Token*); -SQLITE_PRIVATE int sqlite3GetToken(const unsigned char *, int *); +SQLITE_PRIVATE i64 sqlite3GetToken(const unsigned char *, int *); SQLITE_PRIVATE void sqlite3NestedParse(Parse*, const char*, ...); SQLITE_PRIVATE void sqlite3ExpirePreparedStatements(sqlite3*, int); SQLITE_PRIVATE void sqlite3CodeRhsOfIN(Parse*, Expr*, int); @@ -22566,6 +22797,9 @@ static const char * const sqlite3azCompileOpt[] = { #ifdef SQLITE_ENABLE_BYTECODE_VTAB "ENABLE_BYTECODE_VTAB", #endif +#ifdef SQLITE_ENABLE_CARRAY + "ENABLE_CARRAY", +#endif #ifdef SQLITE_ENABLE_CEROD "ENABLE_CEROD=" CTIMEOPT_VAL(SQLITE_ENABLE_CEROD), #endif @@ -22656,6 +22890,9 @@ static const char * const sqlite3azCompileOpt[] = { #ifdef SQLITE_ENABLE_OVERSIZE_CELL_CHECK "ENABLE_OVERSIZE_CELL_CHECK", #endif +#ifdef SQLITE_ENABLE_PERCENTILE + "ENABLE_PERCENTILE", +#endif #ifdef SQLITE_ENABLE_PREUPDATE_HOOK "ENABLE_PREUPDATE_HOOK", #endif @@ -23870,7 +24107,7 @@ struct sqlite3_value { ** MEM_Int, MEM_Real, and MEM_IntReal. ** ** * MEM_Blob|MEM_Zero A blob in Mem.z of length Mem.n plus -** MEM.u.i extra 0x00 bytes at the end. +** Mem.u.nZero extra 0x00 bytes at the end. ** ** * MEM_Int Integer stored in Mem.u.i. ** @@ -24139,7 +24376,9 @@ struct PreUpdate { Table *pTab; /* Schema object being updated */ Index *pPk; /* PK index if pTab is WITHOUT ROWID */ sqlite3_value **apDflt; /* Array of default values, if required */ - u8 keyinfoSpace[SZ_KEYINFO(0)]; /* Space to hold pKeyinfo[0] content */ + struct { + u8 keyinfoSpace[SZ_KEYINFO_0]; /* Space to hold pKeyinfo[0] content */ + } uKey; }; /* @@ -24303,9 +24542,11 @@ SQLITE_PRIVATE int sqlite3VdbeCheckMemInvariants(Mem*); #endif #ifndef SQLITE_OMIT_FOREIGN_KEY -SQLITE_PRIVATE int sqlite3VdbeCheckFk(Vdbe *, int); +SQLITE_PRIVATE int sqlite3VdbeCheckFkImmediate(Vdbe*); +SQLITE_PRIVATE int sqlite3VdbeCheckFkDeferred(Vdbe*); #else -# define sqlite3VdbeCheckFk(p,i) 0 +# define sqlite3VdbeCheckFkImmediate(p) 0 +# define sqlite3VdbeCheckFkDeferred(p) 0 #endif #ifdef SQLITE_DEBUG @@ -24514,23 +24755,25 @@ SQLITE_PRIVATE int sqlite3LookasideUsed(sqlite3 *db, int *pHighwater){ /* ** Query status information for a single database connection */ -SQLITE_API int sqlite3_db_status( - sqlite3 *db, /* The database connection whose status is desired */ - int op, /* Status verb */ - int *pCurrent, /* Write current value here */ - int *pHighwater, /* Write high-water mark here */ - int resetFlag /* Reset high-water mark if true */ +SQLITE_API int sqlite3_db_status64( + sqlite3 *db, /* The database connection whose status is desired */ + int op, /* Status verb */ + sqlite3_int64 *pCurrent, /* Write current value here */ + sqlite3_int64 *pHighwtr, /* Write high-water mark here */ + int resetFlag /* Reset high-water mark if true */ ){ int rc = SQLITE_OK; /* Return code */ #ifdef SQLITE_ENABLE_API_ARMOR - if( !sqlite3SafetyCheckOk(db) || pCurrent==0|| pHighwater==0 ){ + if( !sqlite3SafetyCheckOk(db) || pCurrent==0|| pHighwtr==0 ){ return SQLITE_MISUSE_BKPT; } #endif sqlite3_mutex_enter(db->mutex); switch( op ){ case SQLITE_DBSTATUS_LOOKASIDE_USED: { - *pCurrent = sqlite3LookasideUsed(db, pHighwater); + int H = 0; + *pCurrent = sqlite3LookasideUsed(db, &H); + *pHighwtr = H; if( resetFlag ){ LookasideSlot *p = db->lookaside.pFree; if( p ){ @@ -24561,7 +24804,7 @@ SQLITE_API int sqlite3_db_status( assert( (op-SQLITE_DBSTATUS_LOOKASIDE_HIT)>=0 ); assert( (op-SQLITE_DBSTATUS_LOOKASIDE_HIT)<3 ); *pCurrent = 0; - *pHighwater = (int)db->lookaside.anStat[op-SQLITE_DBSTATUS_LOOKASIDE_HIT]; + *pHighwtr = db->lookaside.anStat[op-SQLITE_DBSTATUS_LOOKASIDE_HIT]; if( resetFlag ){ db->lookaside.anStat[op - SQLITE_DBSTATUS_LOOKASIDE_HIT] = 0; } @@ -24575,7 +24818,7 @@ SQLITE_API int sqlite3_db_status( */ case SQLITE_DBSTATUS_CACHE_USED_SHARED: case SQLITE_DBSTATUS_CACHE_USED: { - int totalUsed = 0; + sqlite3_int64 totalUsed = 0; int i; sqlite3BtreeEnterAll(db); for(i=0; inDb; i++){ @@ -24591,18 +24834,18 @@ SQLITE_API int sqlite3_db_status( } sqlite3BtreeLeaveAll(db); *pCurrent = totalUsed; - *pHighwater = 0; + *pHighwtr = 0; break; } /* ** *pCurrent gets an accurate estimate of the amount of memory used ** to store the schema for all databases (main, temp, and any ATTACHed - ** databases. *pHighwater is set to zero. + ** databases. *pHighwtr is set to zero. */ case SQLITE_DBSTATUS_SCHEMA_USED: { - int i; /* Used to iterate through schemas */ - int nByte = 0; /* Used to accumulate return value */ + int i; /* Used to iterate through schemas */ + int nByte = 0; /* Used to accumulate return value */ sqlite3BtreeEnterAll(db); db->pnBytesFreed = &nByte; @@ -24636,7 +24879,7 @@ SQLITE_API int sqlite3_db_status( db->lookaside.pEnd = db->lookaside.pTrueEnd; sqlite3BtreeLeaveAll(db); - *pHighwater = 0; + *pHighwtr = 0; *pCurrent = nByte; break; } @@ -24644,7 +24887,7 @@ SQLITE_API int sqlite3_db_status( /* ** *pCurrent gets an accurate estimate of the amount of memory used ** to store all prepared statements. - ** *pHighwater is set to zero. + ** *pHighwtr is set to zero. */ case SQLITE_DBSTATUS_STMT_USED: { struct Vdbe *pVdbe; /* Used to iterate through VMs */ @@ -24659,7 +24902,7 @@ SQLITE_API int sqlite3_db_status( db->lookaside.pEnd = db->lookaside.pTrueEnd; db->pnBytesFreed = 0; - *pHighwater = 0; /* IMP: R-64479-57858 */ + *pHighwtr = 0; /* IMP: R-64479-57858 */ *pCurrent = nByte; break; @@ -24667,7 +24910,7 @@ SQLITE_API int sqlite3_db_status( /* ** Set *pCurrent to the total cache hits or misses encountered by all - ** pagers the database handle is connected to. *pHighwater is always set + ** pagers the database handle is connected to. *pHighwtr is always set ** to zero. */ case SQLITE_DBSTATUS_CACHE_SPILL: @@ -24687,19 +24930,39 @@ SQLITE_API int sqlite3_db_status( sqlite3PagerCacheStat(pPager, op, resetFlag, &nRet); } } - *pHighwater = 0; /* IMP: R-42420-56072 */ + *pHighwtr = 0; /* IMP: R-42420-56072 */ /* IMP: R-54100-20147 */ /* IMP: R-29431-39229 */ - *pCurrent = (int)nRet & 0x7fffffff; + *pCurrent = nRet; + break; + } + + /* Set *pCurrent to the number of bytes that the db database connection + ** has spilled to the filesystem in temporary files that could have been + ** stored in memory, had sufficient memory been available. + ** The *pHighwater is always set to zero. + */ + case SQLITE_DBSTATUS_TEMPBUF_SPILL: { + u64 nRet = 0; + if( db->aDb[1].pBt ){ + Pager *pPager = sqlite3BtreePager(db->aDb[1].pBt); + sqlite3PagerCacheStat(pPager, SQLITE_DBSTATUS_CACHE_WRITE, + resetFlag, &nRet); + nRet *= sqlite3BtreeGetPageSize(db->aDb[1].pBt); + } + nRet += db->nSpill; + if( resetFlag ) db->nSpill = 0; + *pHighwtr = 0; + *pCurrent = nRet; break; } /* Set *pCurrent to non-zero if there are unresolved deferred foreign ** key constraints. Set *pCurrent to zero if all foreign key constraints - ** have been satisfied. The *pHighwater is always set to zero. + ** have been satisfied. The *pHighwtr is always set to zero. */ case SQLITE_DBSTATUS_DEFERRED_FKS: { - *pHighwater = 0; /* IMP: R-11967-56545 */ + *pHighwtr = 0; /* IMP: R-11967-56545 */ *pCurrent = db->nDeferredImmCons>0 || db->nDeferredCons>0; break; } @@ -24712,6 +24975,31 @@ SQLITE_API int sqlite3_db_status( return rc; } +/* +** 32-bit variant of sqlite3_db_status64() +*/ +SQLITE_API int sqlite3_db_status( + sqlite3 *db, /* The database connection whose status is desired */ + int op, /* Status verb */ + int *pCurrent, /* Write current value here */ + int *pHighwtr, /* Write high-water mark here */ + int resetFlag /* Reset high-water mark if true */ +){ + sqlite3_int64 C = 0, H = 0; + int rc; +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) || pCurrent==0|| pHighwtr==0 ){ + return SQLITE_MISUSE_BKPT; + } +#endif + rc = sqlite3_db_status64(db, op, &C, &H, resetFlag); + if( rc==0 ){ + *pCurrent = C & 0x7fffffff; + *pHighwtr = H & 0x7fffffff; + } + return rc; +} + /************** End of status.c **********************************************/ /************** Begin file date.c ********************************************/ /* @@ -24904,6 +25192,10 @@ static int parseTimezone(const char *zDate, DateTime *p){ } zDate += 5; p->tz = sgn*(nMn + nHr*60); + if( p->tz==0 ){ /* Forum post 2025-09-17T10:12:14z */ + p->isLocal = 0; + p->isUtc = 1; + } zulu_time: while( sqlite3Isspace(*zDate) ){ zDate++; } return *zDate!=0; @@ -26099,8 +26391,8 @@ static int daysAfterSunday(DateTime *pDate){ ** %l hour 1-12 (leading zero converted to space) ** %m month 01-12 ** %M minute 00-59 -** %p "am" or "pm" -** %P "AM" or "PM" +** %p "AM" or "PM" +** %P "am" or "pm" ** %R time as HH:MM ** %s seconds since 1970-01-01 ** %S seconds 00-59 @@ -31707,6 +31999,7 @@ typedef struct et_info { /* Information about each format field */ etByte type; /* Conversion paradigm */ etByte charset; /* Offset into aDigits[] of the digits string */ etByte prefix; /* Offset into aPrefix[] of the prefix string */ + char iNxt; /* Next with same hash, or 0 for end of chain */ } et_info; /* @@ -31715,44 +32008,61 @@ typedef struct et_info { /* Information about each format field */ #define FLAG_SIGNED 1 /* True if the value to convert is signed */ #define FLAG_STRING 4 /* Allow infinite precision */ - /* -** The following table is searched linearly, so it is good to put the -** most frequently used conversion types first. +** The table is searched by hash. In the case of %C where C is the character +** and that character has ASCII value j, then the hash is j%23. +** +** The order of the entries in fmtinfo[] and the hash chain was entered +** manually, but based on the output of the following TCL script: */ +#if 0 /***** Beginning of script ******/ +foreach c {d s g z q Q w c o u x X f e E G i n % p T S r} { + scan $c %c x + set n($c) $x +} +set mx [llength [array names n]] +puts "count: $mx" + +set mx 27 +puts "*********** mx=$mx ************" +for {set r 0} {$r<$mx} {incr r} { + puts -nonewline [format %2d: $r] + foreach c [array names n] { + if {($n($c))%$mx==$r} {puts -nonewline " $c"} + } + puts "" +} +#endif /***** End of script ********/ + static const char aDigits[] = "0123456789ABCDEF0123456789abcdef"; static const char aPrefix[] = "-x0\000X0"; -static const et_info fmtinfo[] = { - { 'd', 10, 1, etDECIMAL, 0, 0 }, - { 's', 0, 4, etSTRING, 0, 0 }, - { 'g', 0, 1, etGENERIC, 30, 0 }, - { 'z', 0, 4, etDYNSTRING, 0, 0 }, - { 'q', 0, 4, etESCAPE_q, 0, 0 }, - { 'Q', 0, 4, etESCAPE_Q, 0, 0 }, - { 'w', 0, 4, etESCAPE_w, 0, 0 }, - { 'c', 0, 0, etCHARX, 0, 0 }, - { 'o', 8, 0, etRADIX, 0, 2 }, - { 'u', 10, 0, etDECIMAL, 0, 0 }, - { 'x', 16, 0, etRADIX, 16, 1 }, - { 'X', 16, 0, etRADIX, 0, 4 }, -#ifndef SQLITE_OMIT_FLOATING_POINT - { 'f', 0, 1, etFLOAT, 0, 0 }, - { 'e', 0, 1, etEXP, 30, 0 }, - { 'E', 0, 1, etEXP, 14, 0 }, - { 'G', 0, 1, etGENERIC, 14, 0 }, -#endif - { 'i', 10, 1, etDECIMAL, 0, 0 }, - { 'n', 0, 0, etSIZE, 0, 0 }, - { '%', 0, 0, etPERCENT, 0, 0 }, - { 'p', 16, 0, etPOINTER, 0, 1 }, - - /* All the rest are undocumented and are for internal use only */ - { 'T', 0, 0, etTOKEN, 0, 0 }, - { 'S', 0, 0, etSRCITEM, 0, 0 }, - { 'r', 10, 1, etORDINAL, 0, 0 }, +static const et_info fmtinfo[23] = { + /* 0 */ { 's', 0, 4, etSTRING, 0, 0, 1 }, + /* 1 */ { 'E', 0, 1, etEXP, 14, 0, 0 }, /* Hash: 0 */ + /* 2 */ { 'u', 10, 0, etDECIMAL, 0, 0, 3 }, + /* 3 */ { 'G', 0, 1, etGENERIC, 14, 0, 0 }, /* Hash: 2 */ + /* 4 */ { 'w', 0, 4, etESCAPE_w, 0, 0, 0 }, + /* 5 */ { 'x', 16, 0, etRADIX, 16, 1, 0 }, + /* 6 */ { 'c', 0, 0, etCHARX, 0, 0, 0 }, /* Hash: 7 */ + /* 7 */ { 'z', 0, 4, etDYNSTRING, 0, 0, 6 }, + /* 8 */ { 'd', 10, 1, etDECIMAL, 0, 0, 0 }, + /* 9 */ { 'e', 0, 1, etEXP, 30, 0, 0 }, + /* 10 */ { 'f', 0, 1, etFLOAT, 0, 0, 0 }, + /* 11 */ { 'g', 0, 1, etGENERIC, 30, 0, 0 }, + /* 12 */ { 'Q', 0, 4, etESCAPE_Q, 0, 0, 0 }, + /* 13 */ { 'i', 10, 1, etDECIMAL, 0, 0, 0 }, + /* 14 */ { '%', 0, 0, etPERCENT, 0, 0, 16 }, + /* 15 */ { 'T', 0, 0, etTOKEN, 0, 0, 0 }, + /* 16 */ { 'S', 0, 0, etSRCITEM, 0, 0, 0 }, /* Hash: 14 */ + /* 17 */ { 'X', 16, 0, etRADIX, 0, 4, 0 }, /* Hash: 19 */ + /* 18 */ { 'n', 0, 0, etSIZE, 0, 0, 0 }, + /* 19 */ { 'o', 8, 0, etRADIX, 0, 2, 17 }, + /* 20 */ { 'p', 16, 0, etPOINTER, 0, 1, 0 }, + /* 21 */ { 'q', 0, 4, etESCAPE_q, 0, 0, 0 }, + /* 22 */ { 'r', 10, 1, etORDINAL, 0, 0, 0 } }; -/* Notes: +/* Additional Notes: ** ** %S Takes a pointer to SrcItem. Shows name or database.name ** %!S Like %S but prefer the zName over the zAlias @@ -31879,7 +32189,10 @@ SQLITE_API void sqlite3_str_vappendf( #if HAVE_STRCHRNUL fmt = strchrnul(fmt, '%'); #else - do{ fmt++; }while( *fmt && *fmt != '%' ); + fmt = strchr(fmt, '%'); + if( fmt==0 ){ + fmt = bufpt + strlen(bufpt); + } #endif sqlite3_str_append(pAccum, bufpt, (int)(fmt - bufpt)); if( *fmt==0 ) break; @@ -31993,6 +32306,9 @@ SQLITE_API void sqlite3_str_vappendf( }while( !done && (c=(*++fmt))!=0 ); /* Fetch the info entry for the field */ +#ifdef SQLITE_EBCDIC + /* The hash table only works for ASCII. For EBCDIC, we need to do + ** a linear search of the table */ infop = &fmtinfo[0]; xtype = etINVALID; for(idx=0; idxtype; + }else{ + infop = &fmtinfo[0]; + xtype = etINVALID; + } +#endif /* ** At this point, variables are initialized as follows: @@ -32069,6 +32399,14 @@ SQLITE_API void sqlite3_str_vappendf( } prefix = 0; } + +#if WHERETRACE_ENABLED + if( xtype==etPOINTER && sqlite3WhereTrace & 0x100000 ) longvalue = 0; +#endif +#if TREETRACE_ENABLED + if( xtype==etPOINTER && sqlite3TreeTrace & 0x100000 ) longvalue = 0; +#endif + if( longvalue==0 ) flag_alternateform = 0; if( flag_zeropad && precisionfg.isSubquery ) n++; if( pItem->fg.isTabFunc ) n++; - if( pItem->fg.isUsing ) n++; + if( pItem->fg.isUsing || pItem->u3.pOn!=0 ) n++; if( pItem->fg.isUsing ){ sqlite3TreeViewIdList(pView, pItem->u3.pUsing, (--n)>0, "USING"); + }else if( pItem->u3.pOn!=0 ){ + sqlite3TreeViewItem(pView, "ON", (--n)>0); + sqlite3TreeViewExpr(pView, pItem->u3.pOn, 0); + sqlite3TreeViewPop(&pView); } if( pItem->fg.isSubquery ){ assert( n==1 ); @@ -37649,6 +38005,7 @@ SQLITE_PRIVATE void *sqlite3HashInsert(Hash *pH, const char *pKey, void *data){ return 0; } + /************** End of hash.c ************************************************/ /************** Begin file opcodes.c *****************************************/ /* Automatically generated. Do not edit */ @@ -37700,20 +38057,20 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){ /* 34 */ "SorterSort" OpHelp(""), /* 35 */ "Sort" OpHelp(""), /* 36 */ "Rewind" OpHelp(""), - /* 37 */ "SorterNext" OpHelp(""), - /* 38 */ "Prev" OpHelp(""), - /* 39 */ "Next" OpHelp(""), - /* 40 */ "IdxLE" OpHelp("key=r[P3@P4]"), - /* 41 */ "IdxGT" OpHelp("key=r[P3@P4]"), - /* 42 */ "IdxLT" OpHelp("key=r[P3@P4]"), + /* 37 */ "IfEmpty" OpHelp("if( empty(P1) ) goto P2"), + /* 38 */ "SorterNext" OpHelp(""), + /* 39 */ "Prev" OpHelp(""), + /* 40 */ "Next" OpHelp(""), + /* 41 */ "IdxLE" OpHelp("key=r[P3@P4]"), + /* 42 */ "IdxGT" OpHelp("key=r[P3@P4]"), /* 43 */ "Or" OpHelp("r[P3]=(r[P1] || r[P2])"), /* 44 */ "And" OpHelp("r[P3]=(r[P1] && r[P2])"), - /* 45 */ "IdxGE" OpHelp("key=r[P3@P4]"), - /* 46 */ "RowSetRead" OpHelp("r[P3]=rowset(P1)"), - /* 47 */ "RowSetTest" OpHelp("if r[P3] in rowset(P1) goto P2"), - /* 48 */ "Program" OpHelp(""), - /* 49 */ "FkIfZero" OpHelp("if fkctr[P1]==0 goto P2"), - /* 50 */ "IfPos" OpHelp("if r[P1]>0 then r[P1]-=P3, goto P2"), + /* 45 */ "IdxLT" OpHelp("key=r[P3@P4]"), + /* 46 */ "IdxGE" OpHelp("key=r[P3@P4]"), + /* 47 */ "RowSetRead" OpHelp("r[P3]=rowset(P1)"), + /* 48 */ "RowSetTest" OpHelp("if r[P3] in rowset(P1) goto P2"), + /* 49 */ "Program" OpHelp(""), + /* 50 */ "FkIfZero" OpHelp("if fkctr[P1]==0 goto P2"), /* 51 */ "IsNull" OpHelp("if r[P1]==NULL goto P2"), /* 52 */ "NotNull" OpHelp("if r[P1]!=NULL goto P2"), /* 53 */ "Ne" OpHelp("IF r[P3]!=r[P1]"), @@ -37723,49 +38080,49 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){ /* 57 */ "Lt" OpHelp("IF r[P3]=r[P1]"), /* 59 */ "ElseEq" OpHelp(""), - /* 60 */ "IfNotZero" OpHelp("if r[P1]!=0 then r[P1]--, goto P2"), - /* 61 */ "DecrJumpZero" OpHelp("if (--r[P1])==0 goto P2"), - /* 62 */ "IncrVacuum" OpHelp(""), - /* 63 */ "VNext" OpHelp(""), - /* 64 */ "Filter" OpHelp("if key(P3@P4) not in filter(P1) goto P2"), - /* 65 */ "PureFunc" OpHelp("r[P3]=func(r[P2@NP])"), - /* 66 */ "Function" OpHelp("r[P3]=func(r[P2@NP])"), - /* 67 */ "Return" OpHelp(""), - /* 68 */ "EndCoroutine" OpHelp(""), - /* 69 */ "HaltIfNull" OpHelp("if r[P3]=null halt"), - /* 70 */ "Halt" OpHelp(""), - /* 71 */ "Integer" OpHelp("r[P2]=P1"), - /* 72 */ "Int64" OpHelp("r[P2]=P4"), - /* 73 */ "String" OpHelp("r[P2]='P4' (len=P1)"), - /* 74 */ "BeginSubrtn" OpHelp("r[P2]=NULL"), - /* 75 */ "Null" OpHelp("r[P2..P3]=NULL"), - /* 76 */ "SoftNull" OpHelp("r[P1]=NULL"), - /* 77 */ "Blob" OpHelp("r[P2]=P4 (len=P1)"), - /* 78 */ "Variable" OpHelp("r[P2]=parameter(P1)"), - /* 79 */ "Move" OpHelp("r[P2@P3]=r[P1@P3]"), - /* 80 */ "Copy" OpHelp("r[P2@P3+1]=r[P1@P3+1]"), - /* 81 */ "SCopy" OpHelp("r[P2]=r[P1]"), - /* 82 */ "IntCopy" OpHelp("r[P2]=r[P1]"), - /* 83 */ "FkCheck" OpHelp(""), - /* 84 */ "ResultRow" OpHelp("output=r[P1@P2]"), - /* 85 */ "CollSeq" OpHelp(""), - /* 86 */ "AddImm" OpHelp("r[P1]=r[P1]+P2"), - /* 87 */ "RealAffinity" OpHelp(""), - /* 88 */ "Cast" OpHelp("affinity(r[P1])"), - /* 89 */ "Permutation" OpHelp(""), - /* 90 */ "Compare" OpHelp("r[P1@P3] <-> r[P2@P3]"), - /* 91 */ "IsTrue" OpHelp("r[P2] = coalesce(r[P1]==TRUE,P3) ^ P4"), - /* 92 */ "ZeroOrNull" OpHelp("r[P2] = 0 OR NULL"), - /* 93 */ "Offset" OpHelp("r[P3] = sqlite_offset(P1)"), - /* 94 */ "Column" OpHelp("r[P3]=PX cursor P1 column P2"), - /* 95 */ "TypeCheck" OpHelp("typecheck(r[P1@P2])"), - /* 96 */ "Affinity" OpHelp("affinity(r[P1@P2])"), - /* 97 */ "MakeRecord" OpHelp("r[P3]=mkrec(r[P1@P2])"), - /* 98 */ "Count" OpHelp("r[P2]=count()"), - /* 99 */ "ReadCookie" OpHelp(""), - /* 100 */ "SetCookie" OpHelp(""), - /* 101 */ "ReopenIdx" OpHelp("root=P2 iDb=P3"), - /* 102 */ "OpenRead" OpHelp("root=P2 iDb=P3"), + /* 60 */ "IfPos" OpHelp("if r[P1]>0 then r[P1]-=P3, goto P2"), + /* 61 */ "IfNotZero" OpHelp("if r[P1]!=0 then r[P1]--, goto P2"), + /* 62 */ "DecrJumpZero" OpHelp("if (--r[P1])==0 goto P2"), + /* 63 */ "IncrVacuum" OpHelp(""), + /* 64 */ "VNext" OpHelp(""), + /* 65 */ "Filter" OpHelp("if key(P3@P4) not in filter(P1) goto P2"), + /* 66 */ "PureFunc" OpHelp("r[P3]=func(r[P2@NP])"), + /* 67 */ "Function" OpHelp("r[P3]=func(r[P2@NP])"), + /* 68 */ "Return" OpHelp(""), + /* 69 */ "EndCoroutine" OpHelp(""), + /* 70 */ "HaltIfNull" OpHelp("if r[P3]=null halt"), + /* 71 */ "Halt" OpHelp(""), + /* 72 */ "Integer" OpHelp("r[P2]=P1"), + /* 73 */ "Int64" OpHelp("r[P2]=P4"), + /* 74 */ "String" OpHelp("r[P2]='P4' (len=P1)"), + /* 75 */ "BeginSubrtn" OpHelp("r[P2]=NULL"), + /* 76 */ "Null" OpHelp("r[P2..P3]=NULL"), + /* 77 */ "SoftNull" OpHelp("r[P1]=NULL"), + /* 78 */ "Blob" OpHelp("r[P2]=P4 (len=P1)"), + /* 79 */ "Variable" OpHelp("r[P2]=parameter(P1)"), + /* 80 */ "Move" OpHelp("r[P2@P3]=r[P1@P3]"), + /* 81 */ "Copy" OpHelp("r[P2@P3+1]=r[P1@P3+1]"), + /* 82 */ "SCopy" OpHelp("r[P2]=r[P1]"), + /* 83 */ "IntCopy" OpHelp("r[P2]=r[P1]"), + /* 84 */ "FkCheck" OpHelp(""), + /* 85 */ "ResultRow" OpHelp("output=r[P1@P2]"), + /* 86 */ "CollSeq" OpHelp(""), + /* 87 */ "AddImm" OpHelp("r[P1]=r[P1]+P2"), + /* 88 */ "RealAffinity" OpHelp(""), + /* 89 */ "Cast" OpHelp("affinity(r[P1])"), + /* 90 */ "Permutation" OpHelp(""), + /* 91 */ "Compare" OpHelp("r[P1@P3] <-> r[P2@P3]"), + /* 92 */ "IsTrue" OpHelp("r[P2] = coalesce(r[P1]==TRUE,P3) ^ P4"), + /* 93 */ "ZeroOrNull" OpHelp("r[P2] = 0 OR NULL"), + /* 94 */ "Offset" OpHelp("r[P3] = sqlite_offset(P1)"), + /* 95 */ "Column" OpHelp("r[P3]=PX cursor P1 column P2"), + /* 96 */ "TypeCheck" OpHelp("typecheck(r[P1@P2])"), + /* 97 */ "Affinity" OpHelp("affinity(r[P1@P2])"), + /* 98 */ "MakeRecord" OpHelp("r[P3]=mkrec(r[P1@P2])"), + /* 99 */ "Count" OpHelp("r[P2]=count()"), + /* 100 */ "ReadCookie" OpHelp(""), + /* 101 */ "SetCookie" OpHelp(""), + /* 102 */ "ReopenIdx" OpHelp("root=P2 iDb=P3"), /* 103 */ "BitAnd" OpHelp("r[P3]=r[P1]&r[P2]"), /* 104 */ "BitOr" OpHelp("r[P3]=r[P1]|r[P2]"), /* 105 */ "ShiftLeft" OpHelp("r[P3]=r[P2]<0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1)"), - /* 161 */ "AggInverse" OpHelp("accum=r[P3] inverse(r[P2@P5])"), - /* 162 */ "AggStep" OpHelp("accum=r[P3] step(r[P2@P5])"), - /* 163 */ "AggStep1" OpHelp("accum=r[P3] step(r[P2@P5])"), - /* 164 */ "AggValue" OpHelp("r[P3]=value N=P2"), - /* 165 */ "AggFinal" OpHelp("accum=r[P1] N=P2"), - /* 166 */ "Expire" OpHelp(""), - /* 167 */ "CursorLock" OpHelp(""), - /* 168 */ "CursorUnlock" OpHelp(""), - /* 169 */ "TableLock" OpHelp("iDb=P1 root=P2 write=P3"), - /* 170 */ "VBegin" OpHelp(""), - /* 171 */ "VCreate" OpHelp(""), - /* 172 */ "VDestroy" OpHelp(""), - /* 173 */ "VOpen" OpHelp(""), - /* 174 */ "VCheck" OpHelp(""), - /* 175 */ "VInitIn" OpHelp("r[P2]=ValueList(P1,P3)"), - /* 176 */ "VColumn" OpHelp("r[P3]=vcolumn(P2)"), - /* 177 */ "VRename" OpHelp(""), - /* 178 */ "Pagecount" OpHelp(""), - /* 179 */ "MaxPgcnt" OpHelp(""), - /* 180 */ "ClrSubtype" OpHelp("r[P1].subtype = 0"), - /* 181 */ "GetSubtype" OpHelp("r[P2] = r[P1].subtype"), - /* 182 */ "SetSubtype" OpHelp("r[P2].subtype = r[P1]"), - /* 183 */ "FilterAdd" OpHelp("filter(P1) += key(P3@P4)"), - /* 184 */ "Trace" OpHelp(""), - /* 185 */ "CursorHint" OpHelp(""), - /* 186 */ "ReleaseReg" OpHelp("release r[P1@P2] mask P3"), - /* 187 */ "Noop" OpHelp(""), - /* 188 */ "Explain" OpHelp(""), - /* 189 */ "Abortable" OpHelp(""), + /* 155 */ "DropTrigger" OpHelp(""), + /* 156 */ "IntegrityCk" OpHelp(""), + /* 157 */ "RowSetAdd" OpHelp("rowset(P1)=r[P2]"), + /* 158 */ "Param" OpHelp(""), + /* 159 */ "FkCounter" OpHelp("fkctr[P1]+=P2"), + /* 160 */ "MemMax" OpHelp("r[P1]=max(r[P1],r[P2])"), + /* 161 */ "OffsetLimit" OpHelp("if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1)"), + /* 162 */ "AggInverse" OpHelp("accum=r[P3] inverse(r[P2@P5])"), + /* 163 */ "AggStep" OpHelp("accum=r[P3] step(r[P2@P5])"), + /* 164 */ "AggStep1" OpHelp("accum=r[P3] step(r[P2@P5])"), + /* 165 */ "AggValue" OpHelp("r[P3]=value N=P2"), + /* 166 */ "AggFinal" OpHelp("accum=r[P1] N=P2"), + /* 167 */ "Expire" OpHelp(""), + /* 168 */ "CursorLock" OpHelp(""), + /* 169 */ "CursorUnlock" OpHelp(""), + /* 170 */ "TableLock" OpHelp("iDb=P1 root=P2 write=P3"), + /* 171 */ "VBegin" OpHelp(""), + /* 172 */ "VCreate" OpHelp(""), + /* 173 */ "VDestroy" OpHelp(""), + /* 174 */ "VOpen" OpHelp(""), + /* 175 */ "VCheck" OpHelp(""), + /* 176 */ "VInitIn" OpHelp("r[P2]=ValueList(P1,P3)"), + /* 177 */ "VColumn" OpHelp("r[P3]=vcolumn(P2)"), + /* 178 */ "VRename" OpHelp(""), + /* 179 */ "Pagecount" OpHelp(""), + /* 180 */ "MaxPgcnt" OpHelp(""), + /* 181 */ "ClrSubtype" OpHelp("r[P1].subtype = 0"), + /* 182 */ "GetSubtype" OpHelp("r[P2] = r[P1].subtype"), + /* 183 */ "SetSubtype" OpHelp("r[P2].subtype = r[P1]"), + /* 184 */ "FilterAdd" OpHelp("filter(P1) += key(P3@P4)"), + /* 185 */ "Trace" OpHelp(""), + /* 186 */ "CursorHint" OpHelp(""), + /* 187 */ "ReleaseReg" OpHelp("release r[P1@P2] mask P3"), + /* 188 */ "Noop" OpHelp(""), + /* 189 */ "Explain" OpHelp(""), + /* 190 */ "Abortable" OpHelp(""), }; return azName[i]; } @@ -38036,7 +38394,7 @@ static int kvstorageRead(const char*, const char *zKey, char *zBuf, int nBuf); #define KVSTORAGE_KEY_SZ 32 /* Expand the key name with an appropriate prefix and put the result -** zKeyOut[]. The zKeyOut[] buffer is assumed to hold at least +** in zKeyOut[]. The zKeyOut[] buffer is assumed to hold at least ** KVSTORAGE_KEY_SZ bytes. */ static void kvstorageMakeKey( @@ -38095,10 +38453,12 @@ static int kvstorageDelete(const char *zClass, const char *zKey){ ** ** Return the total number of bytes in the data, without truncation, and ** not counting the final zero terminator. Return -1 if the key does -** not exist. +** not exist or its key cannot be read. ** -** If nBuf<=0 then this routine simply returns the size of the data without -** actually reading it. +** If nBuf<=0 then this routine simply returns the size of the data +** without actually reading it. Similarly, if nBuf==1 then it +** zero-terminates zBuf at zBuf[0] and returns the size of the data +** without reading it. */ static int kvstorageRead( const char *zClass, @@ -38147,11 +38507,9 @@ static int kvstorageRead( ** kvvfs i/o methods with JavaScript implementations in WASM builds. ** Maintenance reminder: if this struct changes in any way, the JSON ** rendering of its structure must be updated in -** sqlite3_wasm_enum_json(). There are no binary compatibility -** concerns, so it does not need an iVersion member. This file is -** necessarily always compiled together with sqlite3_wasm_enum_json(), -** and JS code dynamically creates the mapping of members based on -** that JSON description. +** sqlite3-wasm.c:sqlite3__wasm_enum_json(). There are no binary +** compatibility concerns, so it does not need an iVersion +** member. */ typedef struct sqlite3_kvvfs_methods sqlite3_kvvfs_methods; struct sqlite3_kvvfs_methods { @@ -38168,8 +38526,8 @@ struct sqlite3_kvvfs_methods { ** the compiler can hopefully optimize this level of indirection out. ** That said, kvvfs is intended primarily for use in WASM builds. ** -** Note that this is not explicitly flagged as static because the -** amalgamation build will tag it with SQLITE_PRIVATE. +** This is not explicitly flagged as static because the amalgamation +** build will tag it with SQLITE_PRIVATE. */ #ifndef SQLITE_WASM const @@ -39342,10 +39700,11 @@ static struct unix_syscall { #if defined(HAVE_FCHMOD) { "fchmod", (sqlite3_syscall_ptr)fchmod, 0 }, +#define osFchmod ((int(*)(int,mode_t))aSyscall[14].pCurrent) #else { "fchmod", (sqlite3_syscall_ptr)0, 0 }, +#define osFchmod(FID,MODE) 0 #endif -#define osFchmod ((int(*)(int,mode_t))aSyscall[14].pCurrent) #if defined(HAVE_POSIX_FALLOCATE) && HAVE_POSIX_FALLOCATE { "fallocate", (sqlite3_syscall_ptr)posix_fallocate, 0 }, @@ -39439,6 +39798,119 @@ static struct unix_syscall { }; /* End of the overrideable system calls */ +#if defined(SQLITE_DEBUG) || defined(SQLITE_ENABLE_FILESTAT) +/* +** Extract Posix Advisory Locking information about file description fd +** from the /proc/PID/fdinfo/FD pseudo-file. Fill the string buffer a[16] +** with characters to indicate which SQLite-relevant locks are held. +** a[16] will be a 15-character zero-terminated string with the following +** schema: +** +** AAA/B.DDD.DDDDD +** +** Each of character A-D will be "w" or "r" or "-" to indicate either a +** write-lock, a read-lock, or no-lock, respectively. The "." and "/" +** characters are delimiters intended to make the string more easily +** readable by humans. Here are the meaning of the specific letters: +** +** AAA -> The main database locks. PENDING_BYTE, RESERVED_BYTE, +** and SHARED_FIRST, respectively. +** +** B -> The deadman switch lock. Offset 128 of the -shm file. +** +** CCC -> WAL locks: WRITE, CKPT, RECOVER +** +** DDDDD -> WAL read-locks 0 through 5 +** +** Note that elements before the "/" apply to the main database file and +** elements after the "/" apply to the -shm file in WAL mode. +** +** Here is another way of thinking about the meaning of the result string: +** +** AAA/B.CCC.DDDDD +** ||| | ||| \___/ +** PENDING--'|| | ||| `----- READ 0-5 +** RESERVED--'| | ||`---- RECOVER +** SHARED ----' | |`----- CKPT +** DMS ------' `------ WRITE +** +** Return SQLITE_OK on success and SQLITE_ERROR_UNABLE if the /proc +** pseudo-filesystem is unavailable. +*/ +static int unixPosixAdvisoryLocks( + int fd, /* The file descriptor to analyze */ + char a[16] /* Write a text description of PALs here */ +){ + int in; + ssize_t n; + char *p, *pNext, *x; + char z[2000]; + + /* 1 */ + /* 012 4 678 01234 */ + memcpy(a, "---/-.---.-----", 16); + sqlite3_snprintf(sizeof(z), z, "/proc/%d/fdinfo/%d", getpid(), fd); + in = osOpen(z, O_RDONLY, 0); + if( in<0 ){ + return SQLITE_ERROR_UNABLE; + } + n = osRead(in, z, sizeof(z)-1); + osClose(in); + if( n<=0 ) return SQLITE_ERROR_UNABLE; + z[n] = 0; + + /* We are looking for lines that begin with "lock:\t". Examples: + ** + ** lock: 1: POSIX ADVISORY READ 494716 08:02:5277597 1073741826 1073742335 + ** lock: 1: POSIX ADVISORY WRITE 494716 08:02:5282282 120 120 + ** lock: 2: POSIX ADVISORY READ 494716 08:02:5282282 123 123 + ** lock: 3: POSIX ADVISORY READ 494716 08:02:5282282 128 128 + */ + pNext = strstr(z, "lock:\t"); + while( pNext ){ + char cType = 0; + sqlite3_int64 iFirst, iLast; + p = pNext+6; + pNext = strstr(p, "lock:\t"); + if( pNext ) pNext[-1] = 0; + if( (x = strstr(p, " READ "))!=0 ){ + cType = 'r'; + x += 6; + }else if( (x = strstr(p, " WRITE "))!=0 ){ + cType = 'w'; + x += 7; + }else{ + continue; + } + x = strrchr(x, ' '); + if( x==0 ) continue; + iLast = strtoll(x+1, 0, 10); + *x = 0; + x = strrchr(p, ' '); + if( x==0 ) continue; + iFirst = strtoll(x+1, 0, 10); + if( iLast>=PENDING_BYTE ){ + if( iFirst<=PENDING_BYTE && iLast>=PENDING_BYTE ) a[0] = cType; + if( iFirst<=PENDING_BYTE+1 && iLast>=PENDING_BYTE+1 ) a[1] = cType; + if( iFirst<=PENDING_BYTE+2 && iLast>=PENDING_BYTE+510 ) a[2] = cType; + }else if( iLast<=128 ){ + if( iFirst<=128 && iLast>=128 ) a[4] = cType; + if( iFirst<=120 && iLast>=120 ) a[6] = cType; + if( iFirst<=121 && iLast>=121 ) a[7] = cType; + if( iFirst<=122 && iLast>=122 ) a[8] = cType; + if( iFirst<=123 && iLast>=123 ) a[10] = cType; + if( iFirst<=124 && iLast>=124 ) a[11] = cType; + if( iFirst<=125 && iLast>=125 ) a[12] = cType; + if( iFirst<=126 && iLast>=126 ) a[13] = cType; + if( iFirst<=127 && iLast>=127 ) a[14] = cType; + } + } + return SQLITE_OK; +} +#else +# define unixPosixAdvisoryLocks(A,B) SQLITE_ERROR_UNABLE +#endif /* SQLITE_DEBUG || SQLITE_ENABLE_FILESTAT */ + /* ** On some systems, calls to fchown() will trigger a message in a security ** log if they come from non-root processes. So avoid calling fchown() if @@ -39603,9 +40075,8 @@ static int robust_open(const char *z, int f, mode_t m){ /* ** Helper functions to obtain and relinquish the global mutex. The -** global mutex is used to protect the unixInodeInfo and -** vxworksFileId objects used by this file, all of which may be -** shared by multiple threads. +** global mutex is used to protect the unixInodeInfo objects used by +** this file, all of which may be shared by multiple threads. ** ** Function unixMutexHeld() is used to assert() that the global mutex ** is held when required. This function is only used as part of assert() @@ -39807,6 +40278,7 @@ struct vxworksFileId { ** variable: */ static struct vxworksFileId *vxworksFileList = 0; +static sqlite3_mutex *vxworksMutex = 0; /* ** Simplify a filename into its canonical form @@ -39872,14 +40344,14 @@ static struct vxworksFileId *vxworksFindFileId(const char *zAbsoluteName){ ** If found, increment the reference count and return a pointer to ** the existing file ID. */ - unixEnterMutex(); + sqlite3_mutex_enter(vxworksMutex); for(pCandidate=vxworksFileList; pCandidate; pCandidate=pCandidate->pNext){ if( pCandidate->nName==n && memcmp(pCandidate->zCanonicalName, pNew->zCanonicalName, n)==0 ){ sqlite3_free(pNew); pCandidate->nRef++; - unixLeaveMutex(); + sqlite3_mutex_leave(vxworksMutex); return pCandidate; } } @@ -39889,7 +40361,7 @@ static struct vxworksFileId *vxworksFindFileId(const char *zAbsoluteName){ pNew->nName = n; pNew->pNext = vxworksFileList; vxworksFileList = pNew; - unixLeaveMutex(); + sqlite3_mutex_leave(vxworksMutex); return pNew; } @@ -39898,7 +40370,7 @@ static struct vxworksFileId *vxworksFindFileId(const char *zAbsoluteName){ ** the object when the reference count reaches zero. */ static void vxworksReleaseFileId(struct vxworksFileId *pId){ - unixEnterMutex(); + sqlite3_mutex_enter(vxworksMutex); assert( pId->nRef>0 ); pId->nRef--; if( pId->nRef==0 ){ @@ -39908,7 +40380,7 @@ static void vxworksReleaseFileId(struct vxworksFileId *pId){ *pp = pId->pNext; sqlite3_free(pId); } - unixLeaveMutex(); + sqlite3_mutex_leave(vxworksMutex); } #endif /* OS_VXWORKS */ /*************** End of Unique File ID Utility Used By VxWorks **************** @@ -40296,6 +40768,10 @@ static int findInodeInfo( storeLastErrno(pFile, errno); return SQLITE_IOERR; } + if( fsync(fd) ){ + storeLastErrno(pFile, errno); + return SQLITE_IOERR_FSYNC; + } rc = osFstat(fd, &statbuf); if( rc!=0 ){ storeLastErrno(pFile, errno); @@ -40465,18 +40941,42 @@ static int osSetPosixAdvisoryLock( struct flock *pLock, /* The description of the lock */ unixFile *pFile /* Structure holding timeout value */ ){ - int tm = pFile->iBusyTimeout; - int rc = osFcntl(h,F_SETLK,pLock); - while( rc<0 && tm>0 ){ - /* On systems that support some kind of blocking file lock with a timeout, - ** make appropriate changes here to invoke that blocking file lock. On - ** generic posix, however, there is no such API. So we simply try the - ** lock once every millisecond until either the timeout expires, or until - ** the lock is obtained. */ - unixSleep(0,1000); + int rc = 0; + + if( pFile->iBusyTimeout==0 ){ + /* unixFile->iBusyTimeout is set to 0. In this case, attempt a + ** non-blocking lock. */ rc = osFcntl(h,F_SETLK,pLock); - tm--; + }else{ + /* unixFile->iBusyTimeout is set to greater than zero. In this case, + ** attempt a blocking-lock with a unixFile->iBusyTimeout ms timeout. + ** + ** On systems that support some kind of blocking file lock operation, + ** this block should be replaced by code to attempt a blocking lock + ** with a timeout of unixFile->iBusyTimeout ms. The code below is + ** placeholder code. If SQLITE_TEST is defined, the placeholder code + ** retries the lock once every 1ms until it succeeds or the timeout + ** is reached. Or, if SQLITE_TEST is not defined, the placeholder + ** code attempts a non-blocking lock and sets unixFile->iBusyTimeout + ** to 0. This causes the caller to return SQLITE_BUSY, instead of + ** SQLITE_BUSY_TIMEOUT to SQLite - as required by a VFS that does not + ** support blocking locks. + */ +#ifdef SQLITE_TEST + int tm = pFile->iBusyTimeout; + while( tm>0 ){ + rc = osFcntl(h,F_SETLK,pLock); + if( rc==0 ) break; + unixSleep(0,1000); + tm--; + } +#else + rc = osFcntl(h,F_SETLK,pLock); + pFile->iBusyTimeout = 0; +#endif + /* End of code to replace with real blocking-locks code. */ } + return rc; } #endif /* SQLITE_ENABLE_SETLK_TIMEOUT */ @@ -40534,6 +41034,13 @@ static int unixFileLock(unixFile *pFile, struct flock *pLock){ return rc; } +#if !defined(SQLITE_WASI) && !defined(SQLITE_OMIT_WAL) +/* Forward reference */ +static int unixIsSharingShmNode(unixFile*); +#else +#define unixIsSharingShmNode(pFile) (0) +#endif + /* ** Lock the file with the lock specified by parameter eFileLock - one ** of the following: @@ -40722,7 +41229,9 @@ static int unixLock(sqlite3_file *id, int eFileLock){ pInode->nLock++; pInode->nShared = 1; } - }else if( eFileLock==EXCLUSIVE_LOCK && pInode->nShared>1 ){ + }else if( (eFileLock==EXCLUSIVE_LOCK && pInode->nShared>1) + || unixIsSharingShmNode(pFile) + ){ /* We are trying for an exclusive lock but another thread in this ** same process is still holding a shared lock. */ rc = SQLITE_BUSY; @@ -42817,6 +43326,10 @@ static int unixGetTempname(int nBuf, char *zBuf); #if !defined(SQLITE_WASI) && !defined(SQLITE_OMIT_WAL) static int unixFcntlExternalReader(unixFile*, int*); #endif +#if defined(SQLITE_DEBUG) || defined(SQLITE_ENABLE_FILESTAT) + static void unixDescribeShm(sqlite3_str*,unixShm*); +#endif + /* ** Information and control of an open file handle. @@ -42959,6 +43472,66 @@ static int unixFileControl(sqlite3_file *id, int op, void *pArg){ return SQLITE_OK; #endif } + +#if defined(SQLITE_DEBUG) || defined(SQLITE_ENABLE_FILESTAT) + case SQLITE_FCNTL_FILESTAT: { + sqlite3_str *pStr = (sqlite3_str*)pArg; + char aLck[16]; + unixInodeInfo *pInode; + static const char *azLock[] = { "SHARED", "RESERVED", + "PENDING", "EXCLUSIVE" }; + sqlite3_str_appendf(pStr, "{\"h\":%d", pFile->h); + sqlite3_str_appendf(pStr, ",\"vfs\":\"%s\"", pFile->pVfs->zName); + if( pFile->eFileLock ){ + sqlite3_str_appendf(pStr, ",\"eFileLock\":\"%s\"", + azLock[pFile->eFileLock-1]); + if( unixPosixAdvisoryLocks(pFile->h, aLck)==SQLITE_OK ){ + sqlite3_str_appendf(pStr, ",\"pal\":\"%s\"", aLck); + } + } + unixEnterMutex(); + if( pFile->pShm ){ + sqlite3_str_appendall(pStr, ",\"shm\":"); + unixDescribeShm(pStr, pFile->pShm); + } +#if SQLITE_MAX_MMAP_SIZE>0 + if( pFile->mmapSize ){ + sqlite3_str_appendf(pStr, ",\"mmapSize\":%lld", pFile->mmapSize); + sqlite3_str_appendf(pStr, ",\"nFetchOut\":%d", pFile->nFetchOut); + } +#endif + if( (pInode = pFile->pInode)!=0 ){ + sqlite3_str_appendf(pStr, ",\"inode\":{\"nRef\":%d",pInode->nRef); + sqlite3_mutex_enter(pInode->pLockMutex); + sqlite3_str_appendf(pStr, ",\"nShared\":%d", pInode->nShared); + if( pInode->eFileLock ){ + sqlite3_str_appendf(pStr, ",\"eFileLock\":\"%s\"", + azLock[pInode->eFileLock-1]); + } + if( pInode->pUnused ){ + char cSep = '['; + UnixUnusedFd *pUFd = pFile->pInode->pUnused; + sqlite3_str_appendall(pStr, ",\"unusedFd\":"); + while( pUFd ){ + sqlite3_str_appendf(pStr, "%c{\"fd\":%d,\"flags\":%d", + cSep, pUFd->fd, pUFd->flags); + cSep = ','; + if( unixPosixAdvisoryLocks(pUFd->fd, aLck)==SQLITE_OK ){ + sqlite3_str_appendf(pStr, ",\"pal\":\"%s\"", aLck); + } + sqlite3_str_append(pStr, "}", 1); + pUFd = pUFd->pNext; + } + sqlite3_str_append(pStr, "]", 1); + } + sqlite3_mutex_leave(pInode->pLockMutex); + sqlite3_str_append(pStr, "}", 1); + } + unixLeaveMutex(); + sqlite3_str_append(pStr, "}", 1); + return SQLITE_OK; + } +#endif /* SQLITE_DEBUG || SQLITE_ENABLE_FILESTAT */ } return SQLITE_NOTFOUND; } @@ -43225,6 +43798,26 @@ struct unixShm { #define UNIX_SHM_BASE ((22+SQLITE_SHM_NLOCK)*4) /* first lock byte */ #define UNIX_SHM_DMS (UNIX_SHM_BASE+SQLITE_SHM_NLOCK) /* deadman switch */ +#if defined(SQLITE_DEBUG) || defined(SQLITE_ENABLE_FILESTAT) +/* +** Describe the pShm object using JSON. Used for diagnostics only. +*/ +static void unixDescribeShm(sqlite3_str *pStr, unixShm *pShm){ + unixShmNode *pNode = pShm->pShmNode; + char aLck[16]; + sqlite3_str_appendf(pStr, "{\"h\":%d", pNode->hShm); + assert( unixMutexHeld() ); + sqlite3_str_appendf(pStr, ",\"nRef\":%d", pNode->nRef); + sqlite3_str_appendf(pStr, ",\"id\":%d", pShm->id); + sqlite3_str_appendf(pStr, ",\"sharedMask\":%d", pShm->sharedMask); + sqlite3_str_appendf(pStr, ",\"exclMask\":%d", pShm->exclMask); + if( unixPosixAdvisoryLocks(pNode->hShm, aLck)==SQLITE_OK ){ + sqlite3_str_appendf(pStr, ",\"pal\":\"%s\"", aLck); + } + sqlite3_str_append(pStr, "}", 1); +} +#endif /* SQLITE_DEBUG || SQLITE_ENABLE_FILESTAT */ + /* ** Use F_GETLK to check whether or not there are any readers with open ** wal-mode transactions in other processes on database file pFile. If @@ -43258,6 +43851,49 @@ static int unixFcntlExternalReader(unixFile *pFile, int *piOut){ return rc; } +/* +** If pFile has a -shm file open and it is sharing that file with some +** other connection, either in the same process or in a separate process, +** then return true. Return false if either pFile does not have a -shm +** file open or if it is the only connection to that -shm file across the +** entire system. +** +** This routine is not required for correct operation. It can always return +** false and SQLite will continue to operate according to spec. However, +** when this routine does its job, it adds extra robustness in cases +** where database file locks have been erroneously deleted in a WAL-mode +** database by doing close(open(DATABASE_PATHNAME)) or similar. +** +** With false negatives, SQLite still operates to spec, though with less +** robustness. With false positives, the last database connection on a +** WAL-mode database will fail to unlink the -wal and -shm files, which +** is annoying but harmless. False positives will also prevent a database +** connection from running "PRAGMA journal_mode=DELETE" in order to take +** the database out of WAL mode, which is perhaps more serious, but is +** still not a disaster. +*/ +static int unixIsSharingShmNode(unixFile *pFile){ + int rc; + unixShmNode *pShmNode; + if( pFile->pShm==0 ) return 0; + if( pFile->ctrlFlags & UNIXFILE_EXCL ) return 0; + pShmNode = pFile->pShm->pShmNode; + rc = 1; + unixEnterMutex(); + if( ALWAYS(pShmNode->nRef==1) ){ + struct flock lock; + lock.l_whence = SEEK_SET; + lock.l_start = UNIX_SHM_DMS; + lock.l_len = 1; + lock.l_type = F_WRLCK; + osFcntl(pShmNode->hShm, F_GETLK, &lock); + if( lock.l_type==F_UNLCK ){ + rc = 0; + } + } + unixLeaveMutex(); + return rc; +} /* ** Apply posix advisory locks for all bytes from ofst through ofst+n-1. @@ -43303,7 +43939,8 @@ static int unixShmSystemLock( /* Locks are within range */ assert( n>=1 && n<=SQLITE_SHM_NLOCK ); - assert( ofst>=UNIX_SHM_BASE && ofst<=(UNIX_SHM_DMS+SQLITE_SHM_NLOCK) ); + assert( ofst>=UNIX_SHM_BASE && ofst<=UNIX_SHM_DMS ); + assert( ofst+n-1<=UNIX_SHM_DMS ); if( pShmNode->hShm>=0 ){ int res; @@ -43835,7 +44472,7 @@ static int assertLockingArrayOk(unixShmNode *pShmNode){ return (memcmp(pShmNode->aLock, aLock, sizeof(aLock))==0); #endif } -#endif +#endif /* !defined(SQLITE_WASI) && !defined(SQLITE_OMIT_WAL) */ /* ** Change the lock state for a shared-memory segment. @@ -44797,10 +45434,17 @@ static int fillInUnixFile( storeLastErrno(pNew, 0); #if OS_VXWORKS if( rc!=SQLITE_OK ){ - if( h>=0 ) robust_close(pNew, h, __LINE__); - h = -1; - osUnlink(zFilename); - pNew->ctrlFlags |= UNIXFILE_DELETE; + if( h>=0 ){ + robust_close(pNew, h, __LINE__); + h = -1; + } + if( pNew->ctrlFlags & UNIXFILE_DELETE ){ + osUnlink(zFilename); + } + if( pNew->pId ){ + vxworksReleaseFileId(pNew->pId); + pNew->pId = 0; + } } #endif if( rc!=SQLITE_OK ){ @@ -44844,6 +45488,9 @@ static const char *unixTempFileDir(void){ while(1){ if( zDir!=0 +#if OS_VXWORKS + && zDir[0]=='/' +#endif && osStat(zDir, &buf)==0 && S_ISDIR(buf.st_mode) && osAccess(zDir, 03)==0 @@ -45158,6 +45805,12 @@ static int unixOpen( || eType==SQLITE_OPEN_TRANSIENT_DB || eType==SQLITE_OPEN_WAL ); +#if OS_VXWORKS + /* The file-ID mechanism used in Vxworks requires that all pathnames + ** provided to unixOpen must be absolute pathnames. */ + if( zPath!=0 && zPath[0]!='/' ){ return SQLITE_CANTOPEN; } +#endif + /* Detect a pid change and reset the PRNG. There is a race condition ** here such that two or more threads all trying to open databases at ** the same instant might all reset the PRNG. But multiple resets @@ -45358,8 +46011,11 @@ static int unixOpen( } #endif - assert( zPath==0 || zPath[0]=='/' - || eType==SQLITE_OPEN_SUPER_JOURNAL || eType==SQLITE_OPEN_MAIN_JOURNAL + assert( zPath==0 + || zPath[0]=='/' + || eType==SQLITE_OPEN_SUPER_JOURNAL + || eType==SQLITE_OPEN_MAIN_JOURNAL + || eType==SQLITE_OPEN_TEMP_JOURNAL ); rc = fillInUnixFile(pVfs, fd, pFile, zPath, ctrlFlags); @@ -47088,6 +47744,9 @@ SQLITE_API int sqlite3_os_init(void){ sqlite3KvvfsInit(); #endif unixBigLock = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1); +#if OS_VXWORKS + vxworksMutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS2); +#endif #ifndef SQLITE_OMIT_WAL /* Validate lock assumptions */ @@ -47122,6 +47781,9 @@ SQLITE_API int sqlite3_os_init(void){ */ SQLITE_API int sqlite3_os_end(void){ unixBigLock = 0; +#if OS_VXWORKS + vxworksMutex = 0; +#endif return SQLITE_OK; } @@ -49794,6 +50456,7 @@ static BOOL winLockFile( #endif } +#ifndef SQLITE_OMIT_WAL /* ** Lock a region of nByte bytes starting at offset offset of file hFile. ** Take an EXCLUSIVE lock if parameter bExclusive is true, or a SHARED lock @@ -49876,6 +50539,7 @@ static int winHandleLockTimeout( } return rc; } +#endif /* #ifndef SQLITE_OMIT_WAL */ /* ** Unlock a file region. @@ -49910,6 +50574,7 @@ static BOOL winUnlockFile( #endif } +#ifndef SQLITE_OMIT_WAL /* ** Remove an nByte lock starting at offset iOff from HANDLE h. */ @@ -49917,6 +50582,7 @@ static int winHandleUnlock(HANDLE h, int iOff, int nByte){ BOOL ret = winUnlockFile(&h, iOff, 0, nByte, 0); return (ret ? SQLITE_OK : SQLITE_IOERR_UNLOCK); } +#endif /***************************************************************************** ** The next group of routines implement the I/O methods specified @@ -50254,6 +50920,7 @@ static int winWrite( return SQLITE_OK; } +#ifndef SQLITE_OMIT_WAL /* ** Truncate the file opened by handle h to nByte bytes in size. */ @@ -50307,6 +50974,7 @@ static void winHandleClose(HANDLE h){ osCloseHandle(h); } } +#endif /* #ifndef SQLITE_OMIT_WAL */ /* ** Truncate an open file to a specified size @@ -51084,6 +51752,28 @@ static int winFileControl(sqlite3_file *id, int op, void *pArg){ } #endif /* SQLITE_ENABLE_SETLK_TIMEOUT */ +#if defined(SQLITE_DEBUG) || defined(SQLITE_ENABLE_FILESTAT) + case SQLITE_FCNTL_FILESTAT: { + sqlite3_str *pStr = (sqlite3_str*)pArg; + sqlite3_str_appendf(pStr, "{\"h\":%llu", (sqlite3_uint64)pFile->h); + sqlite3_str_appendf(pStr, ",\"vfs\":\"%s\"", pFile->pVfs->zName); + if( pFile->locktype ){ + static const char *azLock[] = { "SHARED", "RESERVED", + "PENDING", "EXCLUSIVE" }; + sqlite3_str_appendf(pStr, ",\"locktype\":\"%s\"", + azLock[pFile->locktype-1]); + } +#if SQLITE_MAX_MMAP_SIZE>0 + if( pFile->mmapSize ){ + sqlite3_str_appendf(pStr, ",\"mmapSize\":%lld", pFile->mmapSize); + sqlite3_str_appendf(pStr, ",\"nFetchOut\":%d", pFile->nFetchOut); + } +#endif + sqlite3_str_append(pStr, "}", 1); + return SQLITE_OK; + } +#endif /* SQLITE_DEBUG || SQLITE_ENABLE_FILESTAT */ + } OSTRACE(("FCNTL file=%p, rc=SQLITE_NOTFOUND\n", pFile->h)); return SQLITE_NOTFOUND; @@ -51121,200 +51811,6 @@ static int winDeviceCharacteristics(sqlite3_file *id){ */ static SYSTEM_INFO winSysInfo; -#ifndef SQLITE_OMIT_WAL - -/* -** Helper functions to obtain and relinquish the global mutex. The -** global mutex is used to protect the winLockInfo objects used by -** this file, all of which may be shared by multiple threads. -** -** Function winShmMutexHeld() is used to assert() that the global mutex -** is held when required. This function is only used as part of assert() -** statements. e.g. -** -** winShmEnterMutex() -** assert( winShmMutexHeld() ); -** winShmLeaveMutex() -*/ -static sqlite3_mutex *winBigLock = 0; -static void winShmEnterMutex(void){ - sqlite3_mutex_enter(winBigLock); -} -static void winShmLeaveMutex(void){ - sqlite3_mutex_leave(winBigLock); -} -#ifndef NDEBUG -static int winShmMutexHeld(void) { - return sqlite3_mutex_held(winBigLock); -} -#endif - -/* -** Object used to represent a single file opened and mmapped to provide -** shared memory. When multiple threads all reference the same -** log-summary, each thread has its own winFile object, but they all -** point to a single instance of this object. In other words, each -** log-summary is opened only once per process. -** -** winShmMutexHeld() must be true when creating or destroying -** this object or while reading or writing the following fields: -** -** nRef -** pNext -** -** The following fields are read-only after the object is created: -** -** zFilename -** -** Either winShmNode.mutex must be held or winShmNode.nRef==0 and -** winShmMutexHeld() is true when reading or writing any other field -** in this structure. -** -** File-handle hSharedShm is used to (a) take the DMS lock, (b) truncate -** the *-shm file if the DMS-locking protocol demands it, and (c) map -** regions of the *-shm file into memory using MapViewOfFile() or -** similar. Other locks are taken by individual clients using the -** winShm.hShm handles. -*/ -struct winShmNode { - sqlite3_mutex *mutex; /* Mutex to access this object */ - char *zFilename; /* Name of the file */ - HANDLE hSharedShm; /* File handle open on zFilename */ - - int isUnlocked; /* DMS lock has not yet been obtained */ - int isReadonly; /* True if read-only */ - int szRegion; /* Size of shared-memory regions */ - int nRegion; /* Size of array apRegion */ - - struct ShmRegion { - HANDLE hMap; /* File handle from CreateFileMapping */ - void *pMap; - } *aRegion; - DWORD lastErrno; /* The Windows errno from the last I/O error */ - - int nRef; /* Number of winShm objects pointing to this */ - winShmNode *pNext; /* Next in list of all winShmNode objects */ -#if defined(SQLITE_DEBUG) || defined(SQLITE_HAVE_OS_TRACE) - u8 nextShmId; /* Next available winShm.id value */ -#endif -}; - -/* -** A global array of all winShmNode objects. -** -** The winShmMutexHeld() must be true while reading or writing this list. -*/ -static winShmNode *winShmNodeList = 0; - -/* -** Structure used internally by this VFS to record the state of an -** open shared memory connection. There is one such structure for each -** winFile open on a wal mode database. -*/ -struct winShm { - winShmNode *pShmNode; /* The underlying winShmNode object */ - u16 sharedMask; /* Mask of shared locks held */ - u16 exclMask; /* Mask of exclusive locks held */ - HANDLE hShm; /* File-handle on *-shm file. For locking. */ - int bReadonly; /* True if hShm is opened read-only */ -#if defined(SQLITE_DEBUG) || defined(SQLITE_HAVE_OS_TRACE) - u8 id; /* Id of this connection with its winShmNode */ -#endif -}; - -/* -** Constants used for locking -*/ -#define WIN_SHM_BASE ((22+SQLITE_SHM_NLOCK)*4) /* first lock byte */ -#define WIN_SHM_DMS (WIN_SHM_BASE+SQLITE_SHM_NLOCK) /* deadman switch */ - -/* Forward references to VFS methods */ -static int winOpen(sqlite3_vfs*,const char*,sqlite3_file*,int,int*); -static int winDelete(sqlite3_vfs *,const char*,int); - -/* -** Purge the winShmNodeList list of all entries with winShmNode.nRef==0. -** -** This is not a VFS shared-memory method; it is a utility function called -** by VFS shared-memory methods. -*/ -static void winShmPurge(sqlite3_vfs *pVfs, int deleteFlag){ - winShmNode **pp; - winShmNode *p; - assert( winShmMutexHeld() ); - OSTRACE(("SHM-PURGE pid=%lu, deleteFlag=%d\n", - osGetCurrentProcessId(), deleteFlag)); - pp = &winShmNodeList; - while( (p = *pp)!=0 ){ - if( p->nRef==0 ){ - int i; - if( p->mutex ){ sqlite3_mutex_free(p->mutex); } - for(i=0; inRegion; i++){ - BOOL bRc = osUnmapViewOfFile(p->aRegion[i].pMap); - OSTRACE(("SHM-PURGE-UNMAP pid=%lu, region=%d, rc=%s\n", - osGetCurrentProcessId(), i, bRc ? "ok" : "failed")); - UNUSED_VARIABLE_VALUE(bRc); - bRc = osCloseHandle(p->aRegion[i].hMap); - OSTRACE(("SHM-PURGE-CLOSE pid=%lu, region=%d, rc=%s\n", - osGetCurrentProcessId(), i, bRc ? "ok" : "failed")); - UNUSED_VARIABLE_VALUE(bRc); - } - winHandleClose(p->hSharedShm); - if( deleteFlag ){ - SimulateIOErrorBenign(1); - sqlite3BeginBenignMalloc(); - winDelete(pVfs, p->zFilename, 0); - sqlite3EndBenignMalloc(); - SimulateIOErrorBenign(0); - } - *pp = p->pNext; - sqlite3_free(p->aRegion); - sqlite3_free(p); - }else{ - pp = &p->pNext; - } - } -} - -/* -** The DMS lock has not yet been taken on the shm file associated with -** pShmNode. Take the lock. Truncate the *-shm file if required. -** Return SQLITE_OK if successful, or an SQLite error code otherwise. -*/ -static int winLockSharedMemory(winShmNode *pShmNode, DWORD nMs){ - HANDLE h = pShmNode->hSharedShm; - int rc = SQLITE_OK; - - assert( sqlite3_mutex_held(pShmNode->mutex) ); - rc = winHandleLockTimeout(h, WIN_SHM_DMS, 1, 1, 0); - if( rc==SQLITE_OK ){ - /* We have an EXCLUSIVE lock on the DMS byte. This means that this - ** is the first process to open the file. Truncate it to zero bytes - ** in this case. */ - if( pShmNode->isReadonly ){ - rc = SQLITE_READONLY_CANTINIT; - }else{ - rc = winHandleTruncate(h, 0); - } - - /* Release the EXCLUSIVE lock acquired above. */ - winUnlockFile(&h, WIN_SHM_DMS, 0, 1, 0); - }else if( (rc & 0xFF)==SQLITE_BUSY ){ - rc = SQLITE_OK; - } - - if( rc==SQLITE_OK ){ - /* Take a SHARED lock on the DMS byte. */ - rc = winHandleLockTimeout(h, WIN_SHM_DMS, 1, 0, nMs); - if( rc==SQLITE_OK ){ - pShmNode->isUnlocked = 0; - } - } - - return rc; -} - - /* ** Convert a UTF-8 filename into whatever form the underlying ** operating system wants filenames in. Space to hold the result @@ -51412,6 +51908,208 @@ static void *winConvertFromUtf8Filename(const char *zFilename){ return zConverted; } +#ifndef SQLITE_OMIT_WAL + +/* +** Helper functions to obtain and relinquish the global mutex. The +** global mutex is used to protect the winLockInfo objects used by +** this file, all of which may be shared by multiple threads. +** +** Function winShmMutexHeld() is used to assert() that the global mutex +** is held when required. This function is only used as part of assert() +** statements. e.g. +** +** winShmEnterMutex() +** assert( winShmMutexHeld() ); +** winShmLeaveMutex() +*/ +static sqlite3_mutex *winBigLock = 0; +static void winShmEnterMutex(void){ + sqlite3_mutex_enter(winBigLock); +} +static void winShmLeaveMutex(void){ + sqlite3_mutex_leave(winBigLock); +} +#ifndef NDEBUG +static int winShmMutexHeld(void) { + return sqlite3_mutex_held(winBigLock); +} +#endif + +/* +** Object used to represent a single file opened and mmapped to provide +** shared memory. When multiple threads all reference the same +** log-summary, each thread has its own winFile object, but they all +** point to a single instance of this object. In other words, each +** log-summary is opened only once per process. +** +** winShmMutexHeld() must be true when creating or destroying +** this object, or while editing the global linked list that starts +** at winShmNodeList. +** +** When reading or writing the linked list starting at winShmNode.pWinShmList, +** pShmNode->mutex must be held. +** +** The following fields are constant after the object is created: +** +** zFilename +** hSharedShm +** mutex +** bUseSharedLockHandle +** +** Either winShmNode.mutex must be held or winShmNode.pWinShmList==0 and +** winShmMutexHeld() is true when reading or writing any other field +** in this structure. +** +** File-handle hSharedShm is always used to (a) take the DMS lock, (b) +** truncate the *-shm file if the DMS-locking protocol demands it, and +** (c) map regions of the *-shm file into memory using MapViewOfFile() +** or similar. If bUseSharedLockHandle is true, then other locks are also +** taken on hSharedShm. Or, if bUseSharedLockHandle is false, then other +** locks are taken using each connection's winShm.hShm handles. +*/ +struct winShmNode { + sqlite3_mutex *mutex; /* Mutex to access this object */ + char *zFilename; /* Name of the file */ + HANDLE hSharedShm; /* File handle open on zFilename */ + int bUseSharedLockHandle; /* True to use hSharedShm for everything */ + + int isUnlocked; /* DMS lock has not yet been obtained */ + int isReadonly; /* True if read-only */ + int szRegion; /* Size of shared-memory regions */ + int nRegion; /* Size of array apRegion */ + + struct ShmRegion { + HANDLE hMap; /* File handle from CreateFileMapping */ + void *pMap; + } *aRegion; + DWORD lastErrno; /* The Windows errno from the last I/O error */ + + winShm *pWinShmList; /* List of winShm objects with ptrs to this */ + + winShmNode *pNext; /* Next in list of all winShmNode objects */ +#if defined(SQLITE_DEBUG) || defined(SQLITE_HAVE_OS_TRACE) + u8 nextShmId; /* Next available winShm.id value */ +#endif +}; + +/* +** A global array of all winShmNode objects. +** +** The winShmMutexHeld() must be true while reading or writing this list. +*/ +static winShmNode *winShmNodeList = 0; + +/* +** Structure used internally by this VFS to record the state of an +** open shared memory connection. There is one such structure for each +** winFile open on a wal mode database. +*/ +struct winShm { + winShmNode *pShmNode; /* The underlying winShmNode object */ + u16 sharedMask; /* Mask of shared locks held */ + u16 exclMask; /* Mask of exclusive locks held */ + HANDLE hShm; /* File-handle on *-shm file. For locking. */ + int bReadonly; /* True if hShm is opened read-only */ +#if defined(SQLITE_DEBUG) || defined(SQLITE_HAVE_OS_TRACE) + u8 id; /* Id of this connection with its winShmNode */ +#endif + winShm *pWinShmNext; /* Next winShm object on same winShmNode */ +}; + +/* +** Constants used for locking +*/ +#define WIN_SHM_BASE ((22+SQLITE_SHM_NLOCK)*4) /* first lock byte */ +#define WIN_SHM_DMS (WIN_SHM_BASE+SQLITE_SHM_NLOCK) /* deadman switch */ + +/* Forward references to VFS methods */ +static int winOpen(sqlite3_vfs*,const char*,sqlite3_file*,int,int*); +static int winDelete(sqlite3_vfs *,const char*,int); + +/* +** Purge the winShmNodeList list of all entries with winShmNode.pWinShmList==0. +** +** This is not a VFS shared-memory method; it is a utility function called +** by VFS shared-memory methods. +*/ +static void winShmPurge(sqlite3_vfs *pVfs, int deleteFlag){ + winShmNode **pp; + winShmNode *p; + assert( winShmMutexHeld() ); + OSTRACE(("SHM-PURGE pid=%lu, deleteFlag=%d\n", + osGetCurrentProcessId(), deleteFlag)); + pp = &winShmNodeList; + while( (p = *pp)!=0 ){ + if( p->pWinShmList==0 ){ + int i; + if( p->mutex ){ sqlite3_mutex_free(p->mutex); } + for(i=0; inRegion; i++){ + BOOL bRc = osUnmapViewOfFile(p->aRegion[i].pMap); + OSTRACE(("SHM-PURGE-UNMAP pid=%lu, region=%d, rc=%s\n", + osGetCurrentProcessId(), i, bRc ? "ok" : "failed")); + UNUSED_VARIABLE_VALUE(bRc); + bRc = osCloseHandle(p->aRegion[i].hMap); + OSTRACE(("SHM-PURGE-CLOSE pid=%lu, region=%d, rc=%s\n", + osGetCurrentProcessId(), i, bRc ? "ok" : "failed")); + UNUSED_VARIABLE_VALUE(bRc); + } + winHandleClose(p->hSharedShm); + if( deleteFlag ){ + SimulateIOErrorBenign(1); + sqlite3BeginBenignMalloc(); + winDelete(pVfs, p->zFilename, 0); + sqlite3EndBenignMalloc(); + SimulateIOErrorBenign(0); + } + *pp = p->pNext; + sqlite3_free(p->aRegion); + sqlite3_free(p); + }else{ + pp = &p->pNext; + } + } +} + +/* +** The DMS lock has not yet been taken on the shm file associated with +** pShmNode. Take the lock. Truncate the *-shm file if required. +** Return SQLITE_OK if successful, or an SQLite error code otherwise. +*/ +static int winLockSharedMemory(winShmNode *pShmNode, DWORD nMs){ + HANDLE h = pShmNode->hSharedShm; + int rc = SQLITE_OK; + + assert( sqlite3_mutex_held(pShmNode->mutex) ); + rc = winHandleLockTimeout(h, WIN_SHM_DMS, 1, 1, 0); + if( rc==SQLITE_OK ){ + /* We have an EXCLUSIVE lock on the DMS byte. This means that this + ** is the first process to open the file. Truncate it to zero bytes + ** in this case. */ + if( pShmNode->isReadonly ){ + rc = SQLITE_READONLY_CANTINIT; + }else{ + rc = winHandleTruncate(h, 0); + } + + /* Release the EXCLUSIVE lock acquired above. */ + winUnlockFile(&h, WIN_SHM_DMS, 0, 1, 0); + }else if( (rc & 0xFF)==SQLITE_BUSY ){ + rc = SQLITE_OK; + } + + if( rc==SQLITE_OK ){ + /* Take a SHARED lock on the DMS byte. */ + rc = winHandleLockTimeout(h, WIN_SHM_DMS, 1, 0, nMs); + if( rc==SQLITE_OK ){ + pShmNode->isUnlocked = 0; + } + } + + return rc; +} + + /* ** This function is used to open a handle on a *-shm file. ** @@ -51507,6 +52205,60 @@ static int winHandleOpen( return rc; } +/* +** Close pDbFd's connection to shared-memory. Delete the underlying +** *-shm file if deleteFlag is true. +*/ +static int winCloseSharedMemory(winFile *pDbFd, int deleteFlag){ + winShm *p; /* The connection to be closed */ + winShm **pp; /* Iterator for pShmNode->pWinShmList */ + winShmNode *pShmNode; /* The underlying shared-memory file */ + + p = pDbFd->pShm; + if( p==0 ) return SQLITE_OK; + if( p->hShm!=INVALID_HANDLE_VALUE ){ + osCloseHandle(p->hShm); + } + + winShmEnterMutex(); + pShmNode = p->pShmNode; + + /* Remove this connection from the winShmNode.pWinShmList list */ + sqlite3_mutex_enter(pShmNode->mutex); + for(pp=&pShmNode->pWinShmList; *pp!=p; pp=&(*pp)->pWinShmNext){} + *pp = p->pWinShmNext; + sqlite3_mutex_leave(pShmNode->mutex); + + winShmPurge(pDbFd->pVfs, deleteFlag); + winShmLeaveMutex(); + + /* Free the connection p */ + sqlite3_free(p); + pDbFd->pShm = 0; + return SQLITE_OK; +} + +/* +** testfixture builds may set this global variable to true via a +** Tcl interface. This forces the VFS to use the locking normally +** only used for UNC paths for all files. +*/ +#ifdef SQLITE_TEST +SQLITE_API int sqlite3_win_test_unc_locking = 0; +#else +# define sqlite3_win_test_unc_locking 0 +#endif + +/* +** Return true if the string passed as the only argument is likely +** to be a UNC path. In other words, if it starts with "\\". +*/ +static int winIsUNCPath(const char *zFile){ + if( zFile[0]=='\\' && zFile[1]=='\\' ){ + return 1; + } + return sqlite3_win_test_unc_locking; +} /* ** Open the shared-memory area associated with database file pDbFd. @@ -51533,15 +52285,10 @@ static int winOpenSharedMemory(winFile *pDbFd){ pNew->zFilename = (char*)&pNew[1]; pNew->hSharedShm = INVALID_HANDLE_VALUE; pNew->isUnlocked = 1; + pNew->bUseSharedLockHandle = winIsUNCPath(pDbFd->zPath); sqlite3_snprintf(nName+15, pNew->zFilename, "%s-shm", pDbFd->zPath); sqlite3FileSuffix3(pDbFd->zPath, pNew->zFilename); - /* Open a file-handle on the *-shm file for this connection. This file-handle - ** is only used for locking. The mapping of the *-shm file is created using - ** the shared file handle in winShmNode.hSharedShm. */ - p->bReadonly = sqlite3_uri_boolean(pDbFd->zPath, "readonly_shm", 0); - rc = winHandleOpen(pNew->zFilename, &p->bReadonly, &p->hShm); - /* Look to see if there is an existing winShmNode that can be used. ** If no matching winShmNode currently exists, then create a new one. */ winShmEnterMutex(); @@ -51562,7 +52309,7 @@ static int winOpenSharedMemory(winFile *pDbFd){ /* Open a file-handle to use for mappings, and for the DMS lock. */ if( rc==SQLITE_OK ){ HANDLE h = INVALID_HANDLE_VALUE; - pShmNode->isReadonly = p->bReadonly; + pShmNode->isReadonly = sqlite3_uri_boolean(pDbFd->zPath,"readonly_shm",0); rc = winHandleOpen(pNew->zFilename, &pShmNode->isReadonly, &h); pShmNode->hSharedShm = h; } @@ -51584,20 +52331,35 @@ static int winOpenSharedMemory(winFile *pDbFd){ /* If no error has occurred, link the winShm object to the winShmNode and ** the winShm to pDbFd. */ if( rc==SQLITE_OK ){ + sqlite3_mutex_enter(pShmNode->mutex); p->pShmNode = pShmNode; - pShmNode->nRef++; + p->pWinShmNext = pShmNode->pWinShmList; + pShmNode->pWinShmList = p; #if defined(SQLITE_DEBUG) || defined(SQLITE_HAVE_OS_TRACE) p->id = pShmNode->nextShmId++; #endif pDbFd->pShm = p; + sqlite3_mutex_leave(pShmNode->mutex); }else if( p ){ - winHandleClose(p->hShm); sqlite3_free(p); } assert( rc!=SQLITE_OK || pShmNode->isUnlocked==0 || pShmNode->nRegion==0 ); winShmLeaveMutex(); sqlite3_free(pNew); + + /* Open a file-handle on the *-shm file for this connection. This file-handle + ** is only used for locking. The mapping of the *-shm file is created using + ** the shared file handle in winShmNode.hSharedShm. */ + if( rc==SQLITE_OK && pShmNode->bUseSharedLockHandle==0 ){ + p->bReadonly = sqlite3_uri_boolean(pDbFd->zPath, "readonly_shm", 0); + rc = winHandleOpen(pShmNode->zFilename, &p->bReadonly, &p->hShm); + if( rc!=SQLITE_OK ){ + assert( p->hShm==INVALID_HANDLE_VALUE ); + winCloseSharedMemory(pDbFd, 0); + } + } + return rc; } @@ -51609,33 +52371,7 @@ static int winShmUnmap( sqlite3_file *fd, /* Database holding shared memory */ int deleteFlag /* Delete after closing if true */ ){ - winFile *pDbFd; /* Database holding shared-memory */ - winShm *p; /* The connection to be closed */ - winShmNode *pShmNode; /* The underlying shared-memory file */ - - pDbFd = (winFile*)fd; - p = pDbFd->pShm; - if( p==0 ) return SQLITE_OK; - if( p->hShm!=INVALID_HANDLE_VALUE ){ - osCloseHandle(p->hShm); - } - - pShmNode = p->pShmNode; - winShmEnterMutex(); - - /* If pShmNode->nRef has reached 0, then close the underlying - ** shared-memory file, too. */ - assert( pShmNode->nRef>0 ); - pShmNode->nRef--; - if( pShmNode->nRef==0 ){ - winShmPurge(pDbFd->pVfs, deleteFlag); - } - winShmLeaveMutex(); - - /* Free the connection p */ - sqlite3_free(p); - pDbFd->pShm = 0; - return SQLITE_OK; + return winCloseSharedMemory((winFile*)fd, deleteFlag); } /* @@ -51704,6 +52440,7 @@ static int winShmLock( || (flags==(SQLITE_SHM_SHARED|SQLITE_SHM_LOCK) && 0==(p->sharedMask & mask)) || (flags==(SQLITE_SHM_EXCLUSIVE|SQLITE_SHM_LOCK)) ){ + HANDLE h = p->hShm; if( flags & SQLITE_SHM_UNLOCK ){ /* Case (a) - unlock. */ @@ -51712,7 +52449,27 @@ static int winShmLock( assert( !(flags & SQLITE_SHM_EXCLUSIVE) || (p->exclMask & mask)==mask ); assert( !(flags & SQLITE_SHM_SHARED) || (p->sharedMask & mask)==mask ); - rc = winHandleUnlock(p->hShm, ofst+WIN_SHM_BASE, n); + assert( !(flags & SQLITE_SHM_SHARED) || n==1 ); + if( pShmNode->bUseSharedLockHandle ){ + h = pShmNode->hSharedShm; + if( flags & SQLITE_SHM_SHARED ){ + winShm *pShm; + sqlite3_mutex_enter(pShmNode->mutex); + for(pShm=pShmNode->pWinShmList; pShm; pShm=pShm->pWinShmNext){ + if( pShm!=p && (pShm->sharedMask & mask) ){ + /* Another connection within this process is also holding this + ** SHARED lock. So do not actually release the OS lock. */ + h = INVALID_HANDLE_VALUE; + break; + } + } + sqlite3_mutex_leave(pShmNode->mutex); + } + } + + if( h!=INVALID_HANDLE_VALUE ){ + rc = winHandleUnlock(h, ofst+WIN_SHM_BASE, n); + } /* If successful, also clear the bits in sharedMask/exclMask */ if( rc==SQLITE_OK ){ @@ -51722,7 +52479,32 @@ static int winShmLock( }else{ int bExcl = ((flags & SQLITE_SHM_EXCLUSIVE) ? 1 : 0); DWORD nMs = winFileBusyTimeout(pDbFd); - rc = winHandleLockTimeout(p->hShm, ofst+WIN_SHM_BASE, n, bExcl, nMs); + + if( pShmNode->bUseSharedLockHandle ){ + winShm *pShm; + h = pShmNode->hSharedShm; + sqlite3_mutex_enter(pShmNode->mutex); + for(pShm=pShmNode->pWinShmList; pShm; pShm=pShm->pWinShmNext){ + if( bExcl ){ + if( (pShm->sharedMask|pShm->exclMask) & mask ){ + rc = SQLITE_BUSY; + h = INVALID_HANDLE_VALUE; + } + }else{ + if( pShm->sharedMask & mask ){ + h = INVALID_HANDLE_VALUE; + }else if( pShm->exclMask & mask ){ + rc = SQLITE_BUSY; + h = INVALID_HANDLE_VALUE; + } + } + } + sqlite3_mutex_leave(pShmNode->mutex); + } + + if( h!=INVALID_HANDLE_VALUE ){ + rc = winHandleLockTimeout(h, ofst+WIN_SHM_BASE, n, bExcl, nMs); + } if( rc==SQLITE_OK ){ if( bExcl ){ p->exclMask = (p->exclMask | mask); @@ -54861,6 +55643,7 @@ struct Bitvec { } u; }; + /* ** Create a new bitmap object able to handle bits between 0 and iSize, ** inclusive. Return a pointer to the new object. Return NULL if @@ -55049,6 +55832,52 @@ SQLITE_PRIVATE u32 sqlite3BitvecSize(Bitvec *p){ return p->iSize; } +#ifdef SQLITE_DEBUG +/* +** Show the content of a Bitvec option and its children. Indent +** everything by n spaces. Add x to each bitvec value. +** +** From a debugger such as gdb, one can type: +** +** call sqlite3ShowBitvec(p) +** +** For some Bitvec p and see a recursive view of the Bitvec's content. +*/ +static void showBitvec(Bitvec *p, int n, unsigned x){ + int i; + if( p==0 ){ + printf("NULL\n"); + return; + } + printf("Bitvec 0x%p iSize=%u", p, p->iSize); + if( p->iSize<=BITVEC_NBIT ){ + printf(" bitmap\n"); + printf("%*s bits:", n, ""); + for(i=1; i<=BITVEC_NBIT; i++){ + if( sqlite3BitvecTest(p,i) ) printf(" %u", x+(unsigned)i); + } + printf("\n"); + }else if( p->iDivisor==0 ){ + printf(" hash with %u entries\n", p->nSet); + printf("%*s bits:", n, ""); + for(i=0; iu.aHash[i] ) printf(" %u", x+(unsigned)p->u.aHash[i]); + } + printf("\n"); + }else{ + printf(" sub-bitvec with iDivisor=%u\n", p->iDivisor); + for(i=0; iu.apSub[i]==0 ) continue; + printf("%*s apSub[%d]=", n, "", i); + showBitvec(p->u.apSub[i], n+4, i*p->iDivisor); + } + } +} +SQLITE_PRIVATE void sqlite3ShowBitvec(Bitvec *p){ + showBitvec(p, 0, 0); +} +#endif + #ifndef SQLITE_UNTESTABLE /* ** Let V[] be an array of unsigned characters sufficient to hold @@ -55060,6 +55889,7 @@ SQLITE_PRIVATE u32 sqlite3BitvecSize(Bitvec *p){ #define CLEARBIT(V,I) V[I>>3] &= ~(BITVEC_TELEM)(1<<(I&7)) #define TESTBIT(V,I) (V[I>>3]&(1<<(I&7)))!=0 + /* ** This routine runs an extensive test of the Bitvec code. ** @@ -55068,7 +55898,7 @@ SQLITE_PRIVATE u32 sqlite3BitvecSize(Bitvec *p){ ** by 0, 1, or 3 operands, depending on the opcode. Another ** opcode follows immediately after the last operand. ** -** There are 6 opcodes numbered from 0 through 5. 0 is the +** There are opcodes numbered starting with 0. 0 is the ** "halt" opcode and causes the test to end. ** ** 0 Halt and return the number of errors @@ -55077,18 +55907,25 @@ SQLITE_PRIVATE u32 sqlite3BitvecSize(Bitvec *p){ ** 3 N Set N randomly chosen bits ** 4 N Clear N randomly chosen bits ** 5 N S X Set N bits from S increment X in array only, not in bitvec +** 6 Invoice sqlite3ShowBitvec() on the Bitvec object so far +** 7 X Show compile-time parameters and the hash of X ** ** The opcodes 1 through 4 perform set and clear operations are performed ** on both a Bitvec object and on a linear array of bits obtained from malloc. ** Opcode 5 works on the linear array only, not on the Bitvec. ** Opcode 5 is used to deliberately induce a fault in order to -** confirm that error detection works. +** confirm that error detection works. Opcodes 6 and greater are +** state output opcodes. Opcodes 6 and greater are no-ops unless +** SQLite has been compiled with SQLITE_DEBUG. ** ** At the conclusion of the test the linear array is compared ** against the Bitvec object. If there are any differences, ** an error is returned. If they are the same, zero is returned. ** ** If a memory allocation error occurs, return -1. +** +** sz is the size of the Bitvec. Or if sz is negative, make the size +** 2*(unsigned)(-sz) and disabled the linear vector check. */ SQLITE_PRIVATE int sqlite3BitvecBuiltinTest(int sz, int *aOp){ Bitvec *pBitvec = 0; @@ -55099,10 +55936,15 @@ SQLITE_PRIVATE int sqlite3BitvecBuiltinTest(int sz, int *aOp){ /* Allocate the Bitvec to be tested and a linear array of ** bits to act as the reference */ - pBitvec = sqlite3BitvecCreate( sz ); - pV = sqlite3MallocZero( (7+(i64)sz)/8 + 1 ); + if( sz<=0 ){ + pBitvec = sqlite3BitvecCreate( 2*(unsigned)(-sz) ); + pV = 0; + }else{ + pBitvec = sqlite3BitvecCreate( sz ); + pV = sqlite3MallocZero( (7+(i64)sz)/8 + 1 ); + } pTmpSpace = sqlite3_malloc64(BITVEC_SZ); - if( pBitvec==0 || pV==0 || pTmpSpace==0 ) goto bitvec_end; + if( pBitvec==0 || pTmpSpace==0 || (pV==0 && sz>0) ) goto bitvec_end; /* NULL pBitvec tests */ sqlite3BitvecSet(0, 1); @@ -55111,6 +55953,24 @@ SQLITE_PRIVATE int sqlite3BitvecBuiltinTest(int sz, int *aOp){ /* Run the program */ pc = i = 0; while( (op = aOp[pc])!=0 ){ + if( op>=6 ){ +#ifdef SQLITE_DEBUG + if( op==6 ){ + sqlite3ShowBitvec(pBitvec); + }else if( op==7 ){ + printf("BITVEC_SZ = %d (%d by sizeof)\n", + BITVEC_SZ, (int)sizeof(Bitvec)); + printf("BITVEC_USIZE = %d\n", (int)BITVEC_USIZE); + printf("BITVEC_NELEM = %d\n", (int)BITVEC_NELEM); + printf("BITVEC_NBIT = %d\n", (int)BITVEC_NBIT); + printf("BITVEC_NINT = %d\n", (int)BITVEC_NINT); + printf("BITVEC_MXHASH = %d\n", (int)BITVEC_MXHASH); + printf("BITVEC_NPTR = %d\n", (int)BITVEC_NPTR); + } +#endif + pc++; + continue; + } switch( op ){ case 1: case 2: @@ -55132,12 +55992,12 @@ SQLITE_PRIVATE int sqlite3BitvecBuiltinTest(int sz, int *aOp){ pc += nx; i = (i & 0x7fffffff)%sz; if( (op & 1)!=0 ){ - SETBIT(pV, (i+1)); + if( pV ) SETBIT(pV, (i+1)); if( op!=5 ){ if( sqlite3BitvecSet(pBitvec, i+1) ) goto bitvec_end; } }else{ - CLEARBIT(pV, (i+1)); + if( pV ) CLEARBIT(pV, (i+1)); sqlite3BitvecClear(pBitvec, i+1, pTmpSpace); } } @@ -55147,14 +56007,18 @@ SQLITE_PRIVATE int sqlite3BitvecBuiltinTest(int sz, int *aOp){ ** match (rc==0). Change rc to non-zero if a discrepancy ** is found. */ - rc = sqlite3BitvecTest(0,0) + sqlite3BitvecTest(pBitvec, sz+1) - + sqlite3BitvecTest(pBitvec, 0) - + (sqlite3BitvecSize(pBitvec) - sz); - for(i=1; i<=sz; i++){ - if( (TESTBIT(pV,i))!=sqlite3BitvecTest(pBitvec,i) ){ - rc = i; - break; + if( pV ){ + rc = sqlite3BitvecTest(0,0) + sqlite3BitvecTest(pBitvec, sz+1) + + sqlite3BitvecTest(pBitvec, 0) + + (sqlite3BitvecSize(pBitvec) - sz); + for(i=1; i<=sz; i++){ + if( (TESTBIT(pV,i))!=sqlite3BitvecTest(pBitvec,i) ){ + rc = i; + break; + } } + }else{ + rc = 0; } /* Free allocated structure */ @@ -59915,7 +60779,7 @@ static void pager_unlock(Pager *pPager){ ** have sqlite3WalEndReadTransaction() drop the write-lock, as it once ** did, because this would break "BEGIN EXCLUSIVE" handling for ** SQLITE_ENABLE_SETLK_TIMEOUT builds. */ - sqlite3WalEndWriteTransaction(pPager->pWal); + (void)sqlite3WalEndWriteTransaction(pPager->pWal); } sqlite3WalEndReadTransaction(pPager->pWal); pPager->eState = PAGER_OPEN; @@ -61671,14 +62535,27 @@ SQLITE_PRIVATE void sqlite3PagerSetFlags( unsigned pgFlags /* Various flags */ ){ unsigned level = pgFlags & PAGER_SYNCHRONOUS_MASK; - if( pPager->tempFile ){ + if( pPager->tempFile || level==PAGER_SYNCHRONOUS_OFF ){ pPager->noSync = 1; pPager->fullSync = 0; pPager->extraSync = 0; }else{ - pPager->noSync = level==PAGER_SYNCHRONOUS_OFF ?1:0; + pPager->noSync = 0; pPager->fullSync = level>=PAGER_SYNCHRONOUS_FULL ?1:0; - pPager->extraSync = level==PAGER_SYNCHRONOUS_EXTRA ?1:0; + + /* Set Pager.extraSync if "PRAGMA synchronous=EXTRA" is requested, or + ** if the file-system supports F2FS style atomic writes. If this flag + ** is set, SQLite syncs the directory to disk immediately after deleting + ** a journal file in "PRAGMA journal_mode=DELETE" mode. */ + if( level==PAGER_SYNCHRONOUS_EXTRA +#ifdef SQLITE_ENABLE_BATCH_ATOMIC_WRITE + || (sqlite3OsDeviceCharacteristics(pPager->fd) & SQLITE_IOCAP_BATCH_ATOMIC) +#endif + ){ + pPager->extraSync = 1; + }else{ + pPager->extraSync = 0; + } } if( pPager->noSync ){ pPager->syncFlags = 0; @@ -65571,7 +66448,7 @@ SQLITE_PRIVATE int sqlite3PagerCheckpoint( } if( pPager->pWal ){ rc = sqlite3WalCheckpoint(pPager->pWal, db, eMode, - (eMode==SQLITE_CHECKPOINT_PASSIVE ? 0 : pPager->xBusyHandler), + (eMode<=SQLITE_CHECKPOINT_PASSIVE ? 0 : pPager->xBusyHandler), pPager->pBusyHandlerArg, pPager->walSyncFlags, pPager->pageSize, (u8 *)pPager->pTmpSpace, pnLog, pnCkpt @@ -66481,7 +67358,7 @@ struct WalIterator { /* Size (in bytes) of a WalIterator object suitable for N or fewer segments */ #define SZ_WALITERATOR(N) \ - (offsetof(WalIterator,aSegment)*(N)*sizeof(struct WalSegment)) + (offsetof(WalIterator,aSegment)+(N)*sizeof(struct WalSegment)) /* ** Define the parameters of the hash tables in the wal-index file. There @@ -69367,7 +70244,7 @@ SQLITE_PRIVATE void sqlite3WalEndReadTransaction(Wal *pWal){ assert( pWal->writeLock==0 || pWal->readLock<0 ); #endif if( pWal->readLock>=0 ){ - sqlite3WalEndWriteTransaction(pWal); + (void)sqlite3WalEndWriteTransaction(pWal); walUnlockShared(pWal, WAL_READ_LOCK(pWal->readLock)); pWal->readLock = -1; } @@ -70176,7 +71053,8 @@ SQLITE_PRIVATE int sqlite3WalCheckpoint( /* EVIDENCE-OF: R-62920-47450 The busy-handler callback is never invoked ** in the SQLITE_CHECKPOINT_PASSIVE mode. */ - assert( eMode!=SQLITE_CHECKPOINT_PASSIVE || xBusy==0 ); + assert( SQLITE_CHECKPOINT_NOOPSQLITE_CHECKPOINT_PASSIVE || xBusy==0 ); if( pWal->readOnly ) return SQLITE_READONLY; WALTRACE(("WAL%p: checkpoint begins\n", pWal)); @@ -70193,31 +71071,35 @@ SQLITE_PRIVATE int sqlite3WalCheckpoint( ** EVIDENCE-OF: R-53820-33897 Even if there is a busy-handler configured, ** it will not be invoked in this case. */ - rc = walLockExclusive(pWal, WAL_CKPT_LOCK, 1); - testcase( rc==SQLITE_BUSY ); - testcase( rc!=SQLITE_OK && xBusy2!=0 ); - if( rc==SQLITE_OK ){ - pWal->ckptLock = 1; + if( eMode!=SQLITE_CHECKPOINT_NOOP ){ + rc = walLockExclusive(pWal, WAL_CKPT_LOCK, 1); + testcase( rc==SQLITE_BUSY ); + testcase( rc!=SQLITE_OK && xBusy2!=0 ); + if( rc==SQLITE_OK ){ + pWal->ckptLock = 1; - /* IMPLEMENTATION-OF: R-59782-36818 The SQLITE_CHECKPOINT_FULL, RESTART and - ** TRUNCATE modes also obtain the exclusive "writer" lock on the database - ** file. - ** - ** EVIDENCE-OF: R-60642-04082 If the writer lock cannot be obtained - ** immediately, and a busy-handler is configured, it is invoked and the - ** writer lock retried until either the busy-handler returns 0 or the - ** lock is successfully obtained. - */ - if( eMode!=SQLITE_CHECKPOINT_PASSIVE ){ - rc = walBusyLock(pWal, xBusy2, pBusyArg, WAL_WRITE_LOCK, 1); - if( rc==SQLITE_OK ){ - pWal->writeLock = 1; - }else if( rc==SQLITE_BUSY ){ - eMode2 = SQLITE_CHECKPOINT_PASSIVE; - xBusy2 = 0; - rc = SQLITE_OK; + /* IMPLEMENTATION-OF: R-59782-36818 The SQLITE_CHECKPOINT_FULL, RESTART + ** and TRUNCATE modes also obtain the exclusive "writer" lock on the + ** database file. + ** + ** EVIDENCE-OF: R-60642-04082 If the writer lock cannot be obtained + ** immediately, and a busy-handler is configured, it is invoked and the + ** writer lock retried until either the busy-handler returns 0 or the + ** lock is successfully obtained. + */ + if( eMode!=SQLITE_CHECKPOINT_PASSIVE ){ + rc = walBusyLock(pWal, xBusy2, pBusyArg, WAL_WRITE_LOCK, 1); + if( rc==SQLITE_OK ){ + pWal->writeLock = 1; + }else if( rc==SQLITE_BUSY ){ + eMode2 = SQLITE_CHECKPOINT_PASSIVE; + xBusy2 = 0; + rc = SQLITE_OK; + } } } + }else{ + rc = SQLITE_OK; } @@ -70231,7 +71113,7 @@ SQLITE_PRIVATE int sqlite3WalCheckpoint( ** immediately and do a partial checkpoint if it cannot obtain it. */ walDisableBlocking(pWal); rc = walIndexReadHdr(pWal, &isChanged); - if( eMode2!=SQLITE_CHECKPOINT_PASSIVE ) (void)walEnableBlocking(pWal); + if( eMode2>SQLITE_CHECKPOINT_PASSIVE ) (void)walEnableBlocking(pWal); if( isChanged && pWal->pDbFd->pMethods->iVersion>=3 ){ sqlite3OsUnfetch(pWal->pDbFd, 0, 0); } @@ -70241,7 +71123,7 @@ SQLITE_PRIVATE int sqlite3WalCheckpoint( if( rc==SQLITE_OK ){ if( pWal->hdr.mxFrame && walPagesize(pWal)!=nBuf ){ rc = SQLITE_CORRUPT_BKPT; - }else{ + }else if( eMode2!=SQLITE_CHECKPOINT_NOOP ){ rc = walCheckpoint(pWal, db, eMode2, xBusy2, pBusyArg, sync_flags,zBuf); } @@ -70269,7 +71151,7 @@ SQLITE_PRIVATE int sqlite3WalCheckpoint( sqlite3WalDb(pWal, 0); /* Release the locks. */ - sqlite3WalEndWriteTransaction(pWal); + (void)sqlite3WalEndWriteTransaction(pWal); if( pWal->ckptLock ){ walUnlockExclusive(pWal, WAL_CKPT_LOCK, 1); pWal->ckptLock = 0; @@ -72426,7 +73308,7 @@ static int btreeMoveto( assert( nKey==(i64)(int)nKey ); pIdxKey = sqlite3VdbeAllocUnpackedRecord(pKeyInfo); if( pIdxKey==0 ) return SQLITE_NOMEM_BKPT; - sqlite3VdbeRecordUnpack(pKeyInfo, (int)nKey, pKey, pIdxKey); + sqlite3VdbeRecordUnpack((int)nKey, pKey, pIdxKey); if( pIdxKey->nField==0 || pIdxKey->nField>pKeyInfo->nAllField ){ rc = SQLITE_CORRUPT_BKPT; }else{ @@ -73483,10 +74365,10 @@ static int freeSpace(MemPage *pPage, int iStart, int iSize){ assert( pPage->pBt!=0 ); assert( sqlite3PagerIswriteable(pPage->pDbPage) ); assert( CORRUPT_DB || iStart>=pPage->hdrOffset+6+pPage->childPtrSize ); - assert( CORRUPT_DB || iEnd <= pPage->pBt->usableSize ); + assert( CORRUPT_DB || iEnd <= (int)pPage->pBt->usableSize ); assert( sqlite3_mutex_held(pPage->pBt->mutex) ); assert( iSize>=4 ); /* Minimum cell size is 4 */ - assert( CORRUPT_DB || iStart<=pPage->pBt->usableSize-4 ); + assert( CORRUPT_DB || iStart<=(int)pPage->pBt->usableSize-4 ); /* The list of freeblocks must be in ascending order. Find the ** spot on the list where iStart should be inserted. @@ -74410,6 +75292,7 @@ static int removeFromSharingList(BtShared *pBt){ sqlite3_mutex_leave(pMainMtx); return removed; #else + UNUSED_PARAMETER( pBt ); return 1; #endif } @@ -74627,6 +75510,10 @@ SQLITE_PRIVATE int sqlite3BtreeSetPageSize(Btree *p, int pageSize, int nReserve, sqlite3BtreeEnter(p); pBt->nReserveWanted = (u8)nReserve; x = pBt->pageSize - pBt->usableSize; + if( x==nReserve && (pageSize==0 || (u32)pageSize==pBt->pageSize) ){ + sqlite3BtreeLeave(p); + return SQLITE_OK; + } if( nReservebtsFlags & BTS_PAGESIZE_FIXED ){ sqlite3BtreeLeave(p); @@ -77216,6 +78103,30 @@ SQLITE_PRIVATE int sqlite3BtreeFirst(BtCursor *pCur, int *pRes){ return rc; } +/* Set *pRes to 1 (true) if the BTree pointed to by cursor pCur contains zero +** rows of content. Set *pRes to 0 (false) if the table contains content. +** Return SQLITE_OK on success or some error code (ex: SQLITE_NOMEM) if +** something goes wrong. +*/ +SQLITE_PRIVATE int sqlite3BtreeIsEmpty(BtCursor *pCur, int *pRes){ + int rc; + + assert( cursorOwnsBtShared(pCur) ); + assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) ); + if( pCur->eState==CURSOR_VALID ){ + *pRes = 0; + return SQLITE_OK; + } + rc = moveToRoot(pCur); + if( rc==SQLITE_EMPTY ){ + *pRes = 1; + rc = SQLITE_OK; + }else{ + *pRes = 0; + } + return rc; +} + #ifdef SQLITE_DEBUG /* The cursors is CURSOR_VALID and has BTCF_AtLast set. Verify that ** this flags are true for a consistent database. @@ -77435,8 +78346,8 @@ moveto_table_finish: } /* -** Compare the "idx"-th cell on the page the cursor pCur is currently -** pointing to to pIdxKey using xRecordCompare. Return negative or +** Compare the "idx"-th cell on the page pPage against the key +** pointing to by pIdxKey using xRecordCompare. Return negative or ** zero if the cell is less than or equal pIdxKey. Return positive ** if unknown. ** @@ -77451,12 +78362,11 @@ moveto_table_finish: ** a positive value as that will cause the optimization to be skipped. */ static int indexCellCompare( - BtCursor *pCur, + MemPage *pPage, int idx, UnpackedRecord *pIdxKey, RecordCompare xRecordCompare ){ - MemPage *pPage = pCur->pPage; int c; int nCell; /* Size of the pCell cell in bytes */ u8 *pCell = findCellPastPtr(pPage, idx); @@ -77565,14 +78475,14 @@ SQLITE_PRIVATE int sqlite3BtreeIndexMoveto( ){ int c; if( pCur->ix==pCur->pPage->nCell-1 - && (c = indexCellCompare(pCur, pCur->ix, pIdxKey, xRecordCompare))<=0 + && (c = indexCellCompare(pCur->pPage,pCur->ix,pIdxKey,xRecordCompare))<=0 && pIdxKey->errCode==SQLITE_OK ){ *pRes = c; return SQLITE_OK; /* Cursor already pointing at the correct spot */ } if( pCur->iPage>0 - && indexCellCompare(pCur, 0, pIdxKey, xRecordCompare)<=0 + && indexCellCompare(pCur->pPage, 0, pIdxKey, xRecordCompare)<=0 && pIdxKey->errCode==SQLITE_OK ){ pCur->curFlags &= ~(BTCF_ValidOvfl|BTCF_AtLast); @@ -77789,7 +78699,7 @@ SQLITE_PRIVATE i64 sqlite3BtreeRowCountEst(BtCursor *pCur){ n = pCur->pPage->nCell; for(i=0; iiPage; i++){ - n *= pCur->apPage[i]->nCell; + n *= pCur->apPage[i]->nCell+1; } return n; } @@ -80246,7 +81156,12 @@ static int balance_nonroot( ** of the right-most new sibling page is set to the value that was ** originally in the same field of the right-most old sibling page. */ if( (pageFlags & PTF_LEAF)==0 && nOld!=nNew ){ - MemPage *pOld = (nNew>nOld ? apNew : apOld)[nOld-1]; + MemPage *pOld; + if( nNew>nOld ){ + pOld = apNew[nOld-1]; + }else{ + pOld = apOld[nOld-1]; + } memcpy(&apNew[nNew-1]->aData[8], &pOld->aData[8], 4); } @@ -82878,6 +83793,7 @@ SQLITE_PRIVATE void *sqlite3BtreeSchema(Btree *p, int nBytes, void(*xFree)(void */ SQLITE_PRIVATE int sqlite3BtreeSchemaLocked(Btree *p){ int rc; + UNUSED_PARAMETER(p); /* only used in DEBUG builds */ assert( sqlite3_mutex_held(p->db->mutex) ); sqlite3BtreeEnter(p); rc = querySharedCacheTableLock(p, SCHEMA_ROOT, READ_LOCK); @@ -85063,6 +85979,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemSetStr( if( sqlite3VdbeMemClearAndResize(pMem, (int)MAX(nAlloc,32)) ){ return SQLITE_NOMEM_BKPT; } + assert( pMem->z!=0 ); memcpy(pMem->z, z, nAlloc); }else{ sqlite3VdbeMemRelease(pMem); @@ -88890,10 +89807,12 @@ static int vdbeCommit(sqlite3 *db, Vdbe *p){ if( 0==sqlite3Strlen30(sqlite3BtreeGetFilename(db->aDb[0].pBt)) || nTrans<=1 ){ - for(i=0; rc==SQLITE_OK && inDb; i++){ - Btree *pBt = db->aDb[i].pBt; - if( pBt ){ - rc = sqlite3BtreeCommitPhaseOne(pBt, 0); + if( needXcommit ){ + for(i=0; rc==SQLITE_OK && inDb; i++){ + Btree *pBt = db->aDb[i].pBt; + if( sqlite3BtreeTxnState(pBt)>=SQLITE_TXN_WRITE ){ + rc = sqlite3BtreeCommitPhaseOne(pBt, 0); + } } } @@ -88904,7 +89823,9 @@ static int vdbeCommit(sqlite3 *db, Vdbe *p){ */ for(i=0; rc==SQLITE_OK && inDb; i++){ Btree *pBt = db->aDb[i].pBt; - if( pBt ){ + int txn = sqlite3BtreeTxnState(pBt); + if( txn!=SQLITE_TXN_NONE ){ + assert( needXcommit || txn==SQLITE_TXN_READ ); rc = sqlite3BtreeCommitPhaseTwo(pBt, 0); } } @@ -89159,28 +90080,31 @@ SQLITE_PRIVATE int sqlite3VdbeCloseStatement(Vdbe *p, int eOp){ /* -** This function is called when a transaction opened by the database +** These functions are called when a transaction opened by the database ** handle associated with the VM passed as an argument is about to be -** committed. If there are outstanding deferred foreign key constraint -** violations, return SQLITE_ERROR. Otherwise, SQLITE_OK. +** committed. If there are outstanding foreign key constraint violations +** return an error code. Otherwise, SQLITE_OK. ** ** If there are outstanding FK violations and this function returns -** SQLITE_ERROR, set the result of the VM to SQLITE_CONSTRAINT_FOREIGNKEY -** and write an error message to it. Then return SQLITE_ERROR. +** non-zero, set the result of the VM to SQLITE_CONSTRAINT_FOREIGNKEY +** and write an error message to it. */ #ifndef SQLITE_OMIT_FOREIGN_KEY -SQLITE_PRIVATE int sqlite3VdbeCheckFk(Vdbe *p, int deferred){ +static SQLITE_NOINLINE int vdbeFkError(Vdbe *p){ + p->rc = SQLITE_CONSTRAINT_FOREIGNKEY; + p->errorAction = OE_Abort; + sqlite3VdbeError(p, "FOREIGN KEY constraint failed"); + if( (p->prepFlags & SQLITE_PREPARE_SAVESQL)==0 ) return SQLITE_ERROR; + return SQLITE_CONSTRAINT_FOREIGNKEY; +} +SQLITE_PRIVATE int sqlite3VdbeCheckFkImmediate(Vdbe *p){ + if( p->nFkConstraint==0 ) return SQLITE_OK; + return vdbeFkError(p); +} +SQLITE_PRIVATE int sqlite3VdbeCheckFkDeferred(Vdbe *p){ sqlite3 *db = p->db; - if( (deferred && (db->nDeferredCons+db->nDeferredImmCons)>0) - || (!deferred && p->nFkConstraint>0) - ){ - p->rc = SQLITE_CONSTRAINT_FOREIGNKEY; - p->errorAction = OE_Abort; - sqlite3VdbeError(p, "FOREIGN KEY constraint failed"); - if( (p->prepFlags & SQLITE_PREPARE_SAVESQL)==0 ) return SQLITE_ERROR; - return SQLITE_CONSTRAINT_FOREIGNKEY; - } - return SQLITE_OK; + if( (db->nDeferredCons+db->nDeferredImmCons)==0 ) return SQLITE_OK; + return vdbeFkError(p); } #endif @@ -89274,7 +90198,7 @@ SQLITE_PRIVATE int sqlite3VdbeHalt(Vdbe *p){ /* Check for immediate foreign key violations. */ if( p->rc==SQLITE_OK || (p->errorAction==OE_Fail && !isSpecialError) ){ - (void)sqlite3VdbeCheckFk(p, 0); + (void)sqlite3VdbeCheckFkImmediate(p); } /* If the auto-commit flag is set and this is the only active writer @@ -89288,7 +90212,7 @@ SQLITE_PRIVATE int sqlite3VdbeHalt(Vdbe *p){ && db->nVdbeWrite==(p->readOnly==0) ){ if( p->rc==SQLITE_OK || (p->errorAction==OE_Fail && !isSpecialError) ){ - rc = sqlite3VdbeCheckFk(p, 1); + rc = sqlite3VdbeCheckFkDeferred(p); if( rc!=SQLITE_OK ){ if( NEVER(p->readOnly) ){ sqlite3VdbeLeave(p); @@ -90098,30 +91022,22 @@ SQLITE_PRIVATE void sqlite3VdbeSerialGet( return; } /* -** This routine is used to allocate sufficient space for an UnpackedRecord -** structure large enough to be used with sqlite3VdbeRecordUnpack() if -** the first argument is a pointer to KeyInfo structure pKeyInfo. +** Allocate sufficient space for an UnpackedRecord structure large enough +** to hold a decoded index record for pKeyInfo. ** -** The space is either allocated using sqlite3DbMallocRaw() or from within -** the unaligned buffer passed via the second and third arguments (presumably -** stack space). If the former, then *ppFree is set to a pointer that should -** be eventually freed by the caller using sqlite3DbFree(). Or, if the -** allocation comes from the pSpace/szSpace buffer, *ppFree is set to NULL -** before returning. -** -** If an OOM error occurs, NULL is returned. +** The space is allocated using sqlite3DbMallocRaw(). If an OOM error +** occurs, NULL is returned. */ SQLITE_PRIVATE UnpackedRecord *sqlite3VdbeAllocUnpackedRecord( KeyInfo *pKeyInfo /* Description of the record */ ){ UnpackedRecord *p; /* Unpacked record to return */ - int nByte; /* Number of bytes required for *p */ + u64 nByte; /* Number of bytes required for *p */ assert( sizeof(UnpackedRecord) + sizeof(Mem)*65536 < 0x7fffffff ); nByte = ROUND8P(sizeof(UnpackedRecord)) + sizeof(Mem)*(pKeyInfo->nKeyField+1); p = (UnpackedRecord *)sqlite3DbMallocRaw(pKeyInfo->db, nByte); if( !p ) return 0; p->aMem = (Mem*)&((char*)p)[ROUND8P(sizeof(UnpackedRecord))]; - assert( pKeyInfo->aSortFlags!=0 ); p->pKeyInfo = pKeyInfo; p->nField = pKeyInfo->nKeyField + 1; return p; @@ -90133,7 +91049,6 @@ SQLITE_PRIVATE UnpackedRecord *sqlite3VdbeAllocUnpackedRecord( ** contents of the decoded record. */ SQLITE_PRIVATE void sqlite3VdbeRecordUnpack( - KeyInfo *pKeyInfo, /* Information about the record format */ int nKey, /* Size of the binary record */ const void *pKey, /* The binary record */ UnpackedRecord *p /* Populate this structure before returning. */ @@ -90144,6 +91059,7 @@ SQLITE_PRIVATE void sqlite3VdbeRecordUnpack( u16 u; /* Unsigned loop counter */ u32 szHdr; Mem *pMem = p->aMem; + KeyInfo *pKeyInfo = p->pKeyInfo; p->default_rc = 0; assert( EIGHT_BYTE_ALIGNMENT(pMem) ); @@ -90161,16 +91077,18 @@ SQLITE_PRIVATE void sqlite3VdbeRecordUnpack( pMem->z = 0; sqlite3VdbeSerialGet(&aKey[d], serial_type, pMem); d += sqlite3VdbeSerialTypeLen(serial_type); - pMem++; if( (++u)>=p->nField ) break; + pMem++; } if( d>(u32)nKey && u ){ assert( CORRUPT_DB ); /* In a corrupt record entry, the last pMem might have been set up using ** uninitialized memory. Overwrite its value with NULL, to prevent ** warnings from MSAN. */ - sqlite3VdbeMemSetNull(pMem-1); + sqlite3VdbeMemSetNull(pMem-(unField)); } + testcase( u == pKeyInfo->nKeyField + 1 ); + testcase( u < pKeyInfo->nKeyField + 1 ); assert( u<=pKeyInfo->nKeyField + 1 ); p->nField = u; } @@ -90338,6 +91256,32 @@ static void vdbeAssertFieldCountWithinLimits( ** or positive value if *pMem1 is less than, equal to or greater than ** *pMem2, respectively. Similar in spirit to "rc = (*pMem1) - (*pMem2);". */ +static SQLITE_NOINLINE int vdbeCompareMemStringWithEncodingChange( + const Mem *pMem1, + const Mem *pMem2, + const CollSeq *pColl, + u8 *prcErr /* If an OOM occurs, set to SQLITE_NOMEM */ +){ + int rc; + const void *v1, *v2; + Mem c1; + Mem c2; + sqlite3VdbeMemInit(&c1, pMem1->db, MEM_Null); + sqlite3VdbeMemInit(&c2, pMem1->db, MEM_Null); + sqlite3VdbeMemShallowCopy(&c1, pMem1, MEM_Ephem); + sqlite3VdbeMemShallowCopy(&c2, pMem2, MEM_Ephem); + v1 = sqlite3ValueText((sqlite3_value*)&c1, pColl->enc); + v2 = sqlite3ValueText((sqlite3_value*)&c2, pColl->enc); + if( (v1==0 || v2==0) ){ + if( prcErr ) *prcErr = SQLITE_NOMEM_BKPT; + rc = 0; + }else{ + rc = pColl->xCmp(pColl->pUser, c1.n, v1, c2.n, v2); + } + sqlite3VdbeMemReleaseMalloc(&c1); + sqlite3VdbeMemReleaseMalloc(&c2); + return rc; +} static int vdbeCompareMemString( const Mem *pMem1, const Mem *pMem2, @@ -90349,25 +91293,7 @@ static int vdbeCompareMemString( ** comparison function directly */ return pColl->xCmp(pColl->pUser,pMem1->n,pMem1->z,pMem2->n,pMem2->z); }else{ - int rc; - const void *v1, *v2; - Mem c1; - Mem c2; - sqlite3VdbeMemInit(&c1, pMem1->db, MEM_Null); - sqlite3VdbeMemInit(&c2, pMem1->db, MEM_Null); - sqlite3VdbeMemShallowCopy(&c1, pMem1, MEM_Ephem); - sqlite3VdbeMemShallowCopy(&c2, pMem2, MEM_Ephem); - v1 = sqlite3ValueText((sqlite3_value*)&c1, pColl->enc); - v2 = sqlite3ValueText((sqlite3_value*)&c2, pColl->enc); - if( (v1==0 || v2==0) ){ - if( prcErr ) *prcErr = SQLITE_NOMEM_BKPT; - rc = 0; - }else{ - rc = pColl->xCmp(pColl->pUser, c1.n, v1, c2.n, v2); - } - sqlite3VdbeMemReleaseMalloc(&c1); - sqlite3VdbeMemReleaseMalloc(&c2); - return rc; + return vdbeCompareMemStringWithEncodingChange(pMem1,pMem2,pColl,prcErr); } } @@ -91030,6 +91956,7 @@ SQLITE_PRIVATE RecordCompare sqlite3VdbeFindCompare(UnpackedRecord *p){ ** The easiest way to enforce this limit is to consider only records with ** 13 fields or less. If the first field is an integer, the maximum legal ** header size is (12*5 + 1 + 1) bytes. */ + assert( p->pKeyInfo->aSortFlags!=0 ); if( p->pKeyInfo->nAllField<=13 ){ int flags = p->aMem[0].flags; if( p->pKeyInfo->aSortFlags[0] ){ @@ -91279,6 +92206,7 @@ SQLITE_PRIVATE void sqlite3VdbeSetVarmask(Vdbe *v, int iVar){ } } +#ifndef SQLITE_OMIT_DATETIME_FUNCS /* ** Cause a function to throw an error if it was call from OP_PureFunc ** rather than OP_Function. @@ -91312,6 +92240,7 @@ SQLITE_PRIVATE int sqlite3NotPureFunc(sqlite3_context *pCtx){ } return 1; } +#endif /* SQLITE_OMIT_DATETIME_FUNCS */ #if defined(SQLITE_ENABLE_CURSOR_HINTS) && defined(SQLITE_DEBUG) /* @@ -91388,7 +92317,6 @@ SQLITE_PRIVATE void sqlite3VdbePreUpdateHook( i64 iKey2; PreUpdate preupdate; const char *zTbl = pTab->zName; - static const u8 fakeSortOrder = 0; #ifdef SQLITE_DEBUG int nRealCol; if( pTab->tabFlags & TF_WithoutRowid ){ @@ -91423,11 +92351,11 @@ SQLITE_PRIVATE void sqlite3VdbePreUpdateHook( preupdate.pCsr = pCsr; preupdate.op = op; preupdate.iNewReg = iReg; - preupdate.pKeyinfo = (KeyInfo*)&preupdate.keyinfoSpace; + preupdate.pKeyinfo = (KeyInfo*)&preupdate.uKey; preupdate.pKeyinfo->db = db; preupdate.pKeyinfo->enc = ENC(db); preupdate.pKeyinfo->nKeyField = pTab->nCol; - preupdate.pKeyinfo->aSortFlags = (u8*)&fakeSortOrder; + preupdate.pKeyinfo->aSortFlags = 0; /* Indicate .aColl, .nAllField uninit */ preupdate.iKey1 = iKey1; preupdate.iKey2 = iKey2; preupdate.pTab = pTab; @@ -91457,6 +92385,17 @@ SQLITE_PRIVATE void sqlite3VdbePreUpdateHook( } #endif /* SQLITE_ENABLE_PREUPDATE_HOOK */ +#ifdef SQLITE_ENABLE_PERCENTILE +/* +** Return the name of an SQL function associated with the sqlite3_context. +*/ +SQLITE_PRIVATE const char *sqlite3VdbeFuncName(const sqlite3_context *pCtx){ + assert( pCtx!=0 ); + assert( pCtx->pFunc!=0 ); + return pCtx->pFunc->zName; +} +#endif /* SQLITE_ENABLE_PERCENTILE */ + /************** End of vdbeaux.c *********************************************/ /************** Begin file vdbeapi.c *****************************************/ /* @@ -93154,8 +94093,12 @@ static int bindText( if( zData!=0 ){ pVar = &p->aVar[i-1]; rc = sqlite3VdbeMemSetStr(pVar, zData, nData, encoding, xDel); - if( rc==SQLITE_OK && encoding!=0 ){ - rc = sqlite3VdbeChangeEncoding(pVar, ENC(p->db)); + if( rc==SQLITE_OK ){ + if( encoding==0 ){ + pVar->enc = ENC(p->db); + }else{ + rc = sqlite3VdbeChangeEncoding(pVar, ENC(p->db)); + } } if( rc ){ sqlite3Error(p->db, rc); @@ -93624,7 +94567,7 @@ static UnpackedRecord *vdbeUnpackRecord( pRet = sqlite3VdbeAllocUnpackedRecord(pKeyInfo); if( pRet ){ memset(pRet->aMem, 0, sizeof(Mem)*(pKeyInfo->nKeyField+1)); - sqlite3VdbeRecordUnpack(pKeyInfo, nKey, pKey, pRet); + sqlite3VdbeRecordUnpack(nKey, pKey, pRet); } return pRet; } @@ -93653,6 +94596,9 @@ SQLITE_API int sqlite3_preupdate_old(sqlite3 *db, int iIdx, sqlite3_value **ppVa } if( p->pPk ){ iStore = sqlite3TableColumnToIndex(p->pPk, iIdx); + }else if( iIdx >= p->pTab->nCol ){ + rc = SQLITE_MISUSE_BKPT; + goto preupdate_old_out; }else{ iStore = sqlite3TableColumnToStorage(p->pTab, iIdx); } @@ -93808,6 +94754,8 @@ SQLITE_API int sqlite3_preupdate_new(sqlite3 *db, int iIdx, sqlite3_value **ppVa } if( p->pPk && p->op!=SQLITE_UPDATE ){ iStore = sqlite3TableColumnToIndex(p->pPk, iIdx); + }else if( iIdx >= p->pTab->nCol ){ + return SQLITE_MISUSE_BKPT; }else{ iStore = sqlite3TableColumnToStorage(p->pTab, iIdx); } @@ -94083,10 +95031,10 @@ SQLITE_API void sqlite3_stmt_scanstatus_reset(sqlite3_stmt *pStmt){ ** a host parameter. If the text contains no host parameters, return ** the total number of bytes in the text. */ -static int findNextHostParameter(const char *zSql, int *pnToken){ +static i64 findNextHostParameter(const char *zSql, i64 *pnToken){ int tokenType; - int nTotal = 0; - int n; + i64 nTotal = 0; + i64 n; *pnToken = 0; while( zSql[0] ){ @@ -94133,8 +95081,8 @@ SQLITE_PRIVATE char *sqlite3VdbeExpandSql( sqlite3 *db; /* The database connection */ int idx = 0; /* Index of a host parameter */ int nextIndex = 1; /* Index of next ? host parameter */ - int n; /* Length of a token prefix */ - int nToken; /* Length of the parameter token */ + i64 n; /* Length of a token prefix */ + i64 nToken; /* Length of the parameter token */ int i; /* Loop counter */ Mem *pVar; /* Value of a host parameter */ StrAccum out; /* Accumulate the output here */ @@ -95058,7 +96006,7 @@ static u64 filterHash(const Mem *aMem, const Op *pOp){ static SQLITE_NOINLINE int vdbeColumnFromOverflow( VdbeCursor *pC, /* The BTree cursor from which we are reading */ int iCol, /* The column to read */ - int t, /* The serial-type code for the column value */ + u32 t, /* The serial-type code for the column value */ i64 iOffset, /* Offset to the start of the content value */ u32 cacheStatus, /* Current Vdbe.cacheCtr value */ u32 colCacheCtr, /* Current value of the column cache counter */ @@ -95133,6 +96081,36 @@ static SQLITE_NOINLINE int vdbeColumnFromOverflow( return rc; } +/* +** Send a "statement aborts" message to the error log. +*/ +static SQLITE_NOINLINE void sqlite3VdbeLogAbort( + Vdbe *p, /* The statement that is running at the time of failure */ + int rc, /* Error code */ + Op *pOp, /* Opcode that filed */ + Op *aOp /* All opcodes */ +){ + const char *zSql = p->zSql; /* Original SQL text */ + const char *zPrefix = ""; /* Prefix added to SQL text */ + int pc; /* Opcode address */ + char zXtra[100]; /* Buffer space to store zPrefix */ + + if( p->pFrame ){ + assert( aOp[0].opcode==OP_Init ); + if( aOp[0].p4.z!=0 ){ + assert( aOp[0].p4.z[0]=='-' + && aOp[0].p4.z[1]=='-' + && aOp[0].p4.z[2]==' ' ); + sqlite3_snprintf(sizeof(zXtra), zXtra,"/* %s */ ",aOp[0].p4.z+3); + zPrefix = zXtra; + }else{ + zPrefix = "/* unknown trigger */ "; + } + } + pc = (int)(pOp - aOp); + sqlite3_log(rc, "statement aborts at %d: %s; [%s%s]", + pc, p->zErrMsg, zPrefix, zSql); +} /* ** Return the symbolic name for the data type of a pMem @@ -95658,8 +96636,7 @@ case OP_Halt: { }else{ sqlite3VdbeError(p, "%s", pOp->p4.z); } - pcx = (int)(pOp - aOp); - sqlite3_log(pOp->p1, "abort at %d: %s; [%s]", pcx, p->zErrMsg, p->zSql); + sqlite3VdbeLogAbort(p, pOp->p1, pOp, aOp); } rc = sqlite3VdbeHalt(p); assert( rc==SQLITE_BUSY || rc==SQLITE_OK || rc==SQLITE_ERROR ); @@ -96038,7 +97015,7 @@ case OP_IntCopy: { /* out2 */ ** RETURNING clause. */ case OP_FkCheck: { - if( (rc = sqlite3VdbeCheckFk(p,0))!=SQLITE_OK ){ + if( (rc = sqlite3VdbeCheckFkImmediate(p))!=SQLITE_OK ){ goto abort_due_to_error; } break; @@ -96130,10 +97107,14 @@ case OP_Concat: { /* same as TK_CONCAT, in1, in2, out3 */ if( sqlite3VdbeMemExpandBlob(pIn2) ) goto no_mem; flags2 = pIn2->flags & ~MEM_Str; } - nByte = pIn1->n + pIn2->n; + nByte = pIn1->n; + nByte += pIn2->n; if( nByte>db->aLimit[SQLITE_LIMIT_LENGTH] ){ goto too_big; } +#if SQLITE_MAX_LENGTH>2147483645 + if( nByte>2147483645 ){ goto too_big; } +#endif if( sqlite3VdbeMemGrow(pOut, (int)nByte+2, pOut==pIn2) ){ goto no_mem; } @@ -96817,6 +97798,7 @@ case OP_Compare: { pKeyInfo = pOp->p4.pKeyInfo; assert( n>0 ); assert( pKeyInfo!=0 ); + assert( pKeyInfo->aSortFlags!=0 ); p1 = pOp->p1; p2 = pOp->p2; #ifdef SQLITE_DEBUG @@ -97579,6 +98561,15 @@ op_column_corrupt: ** Take the affinities from the Table object in P4. If any value ** cannot be coerced into the correct type, then raise an error. ** +** If P3==0, then omit checking of VIRTUAL columns. +** +** If P3==1, then omit checking of all generated column, both VIRTUAL +** and STORED. +** +** If P3>=2, then only check column number P3-2 in the table (which will +** be a VIRTUAL column) against the value in reg[P1]. In this case, +** P2 will be 1. +** ** This opcode is similar to OP_Affinity except that this opcode ** forces the register type to the Table column type. This is used ** to implement "strict affinity". @@ -97592,8 +98583,8 @@ op_column_corrupt: ** **
      **
    • P2 should be the number of non-virtual columns in the -** table of P4. -**
    • Table P4 should be a STRICT table. +** table of P4 unless P3>1, in which case P2 will be 1. +**
    • Table P4 is a STRICT table. **
    ** ** If any precondition is false, an assertion fault occurs. @@ -97602,16 +98593,28 @@ case OP_TypeCheck: { Table *pTab; Column *aCol; int i; + int nCol; assert( pOp->p4type==P4_TABLE ); pTab = pOp->p4.pTab; assert( pTab->tabFlags & TF_Strict ); - assert( pTab->nNVCol==pOp->p2 ); + assert( pOp->p3>=0 && pOp->p3nCol+2 ); aCol = pTab->aCol; pIn1 = &aMem[pOp->p1]; - for(i=0; inCol; i++){ - if( aCol[i].colFlags & COLFLAG_GENERATED ){ - if( aCol[i].colFlags & COLFLAG_VIRTUAL ) continue; + if( pOp->p3<2 ){ + assert( pTab->nNVCol==pOp->p2 ); + i = 0; + nCol = pTab->nCol; + }else{ + i = pOp->p3-2; + nCol = i+1; + assert( inCol ); + assert( aCol[i].colFlags & COLFLAG_VIRTUAL ); + assert( pOp->p2==1 ); + } + for(; ip3<2 ){ + if( (aCol[i].colFlags & COLFLAG_VIRTUAL)!=0 ) continue; if( pOp->p3 ){ pIn1++; continue; } } assert( pIn1 < &aMem[pOp->p1+pOp->p2] ); @@ -97933,7 +98936,7 @@ case OP_MakeRecord: { len = (u32)pRec->n; serial_type = (len*2) + 12 + ((pRec->flags & MEM_Str)!=0); if( pRec->flags & MEM_Zero ){ - serial_type += pRec->u.nZero*2; + serial_type += (u32)pRec->u.nZero*2; if( nData ){ if( sqlite3VdbeMemExpandBlob(pRec) ) goto no_mem; len += pRec->u.nZero; @@ -98200,7 +99203,7 @@ case OP_Savepoint: { */ int isTransaction = pSavepoint->pNext==0 && db->isTransactionSavepoint; if( isTransaction && p1==SAVEPOINT_RELEASE ){ - if( (rc = sqlite3VdbeCheckFk(p, 1))!=SQLITE_OK ){ + if( (rc = sqlite3VdbeCheckFkDeferred(p))!=SQLITE_OK ){ goto vdbe_return; } db->autoCommit = 1; @@ -98318,7 +99321,7 @@ case OP_AutoCommit: { "SQL statements in progress"); rc = SQLITE_BUSY; goto abort_due_to_error; - }else if( (rc = sqlite3VdbeCheckFk(p, 1))!=SQLITE_OK ){ + }else if( (rc = sqlite3VdbeCheckFkDeferred(p))!=SQLITE_OK ){ goto vdbe_return; }else{ db->autoCommit = (u8)desiredAutoCommit; @@ -99690,7 +100693,7 @@ case OP_Found: { /* jump, in3, ncycle */ if( rc ) goto no_mem; pIdxKey = sqlite3VdbeAllocUnpackedRecord(pC->pKeyInfo); if( pIdxKey==0 ) goto no_mem; - sqlite3VdbeRecordUnpack(pC->pKeyInfo, r.aMem->n, r.aMem->z, pIdxKey); + sqlite3VdbeRecordUnpack(r.aMem->n, r.aMem->z, pIdxKey); pIdxKey->default_rc = 0; rc = sqlite3BtreeIndexMoveto(pC->uc.pCursor, pIdxKey, &pC->seekResult); sqlite3DbFreeNN(db, pIdxKey); @@ -100688,6 +101691,32 @@ case OP_Rewind: { /* jump0, ncycle */ break; } +/* Opcode: IfEmpty P1 P2 * * * +** Synopsis: if( empty(P1) ) goto P2 +** +** Check to see if the b-tree table that cursor P1 references is empty +** and jump to P2 if it is. +*/ +case OP_IfEmpty: { /* jump */ + VdbeCursor *pC; + BtCursor *pCrsr; + int res; + + assert( pOp->p1>=0 && pOp->p1nCursor ); + assert( pOp->p2>=0 && pOp->p2nOp ); + + pC = p->apCsr[pOp->p1]; + assert( pC!=0 ); + assert( pC->eCurType==CURTYPE_BTREE ); + pCrsr = pC->uc.pCursor; + assert( pCrsr ); + rc = sqlite3BtreeIsEmpty(pCrsr, &res); + if( rc ) goto abort_due_to_error; + VdbeBranchTaken(res!=0,2); + if( res ) goto jump_to_p2; + break; +} + /* Opcode: Next P1 P2 P3 * P5 ** ** Advance cursor P1 so that it points to the next key/data pair in its @@ -102224,6 +103253,7 @@ case OP_Checkpoint: { || pOp->p2==SQLITE_CHECKPOINT_FULL || pOp->p2==SQLITE_CHECKPOINT_RESTART || pOp->p2==SQLITE_CHECKPOINT_TRUNCATE + || pOp->p2==SQLITE_CHECKPOINT_NOOP ); rc = sqlite3Checkpoint(db, pOp->p1, pOp->p2, &aRes[1], &aRes[2]); if( rc ){ @@ -102559,7 +103589,14 @@ case OP_VOpen: { /* ncycle */ const sqlite3_module *pModule; assert( p->bIsReader ); - pCur = 0; + pCur = p->apCsr[pOp->p1]; + if( pCur!=0 + && ALWAYS( pCur->eCurType==CURTYPE_VTAB ) + && ALWAYS( pCur->uc.pVCur->pVtab==pOp->p4.pVtab->pVtab ) + ){ + /* This opcode is a no-op if the cursor is already open */ + break; + } pVCur = 0; pVtab = pOp->p4.pVtab->pVtab; if( pVtab==0 || NEVER(pVtab->pModule==0) ){ @@ -103501,8 +104538,7 @@ abort_due_to_error: p->rc = rc; sqlite3SystemError(db, rc); testcase( sqlite3GlobalConfig.xLog!=0 ); - sqlite3_log(rc, "statement aborts at %d: %s; [%s]", - (int)(pOp - aOp), p->zErrMsg, p->zSql); + sqlite3VdbeLogAbort(p, rc, pOp, aOp); if( p->eVdbeState==VDBE_RUN_STATE ) sqlite3VdbeHalt(p); if( rc==SQLITE_IOERR_NOMEM ) sqlite3OomFault(db); if( rc==SQLITE_CORRUPT && db->autoCommit==0 ){ @@ -103963,7 +104999,7 @@ static int blobReadWrite( int iOffset, int (*xCall)(BtCursor*, u32, u32, void*) ){ - int rc; + int rc = SQLITE_OK; Incrblob *p = (Incrblob *)pBlob; Vdbe *v; sqlite3 *db; @@ -104003,17 +105039,32 @@ static int blobReadWrite( ** using the incremental-blob API, this works. For the sessions module ** anyhow. */ - sqlite3_int64 iKey; - iKey = sqlite3BtreeIntegerKey(p->pCsr); - assert( v->apCsr[0]!=0 ); - assert( v->apCsr[0]->eCurType==CURTYPE_BTREE ); - sqlite3VdbePreUpdateHook( - v, v->apCsr[0], SQLITE_DELETE, p->zDb, p->pTab, iKey, -1, p->iCol - ); + if( sqlite3BtreeCursorIsValidNN(p->pCsr)==0 ){ + /* If the cursor is not currently valid, try to reseek it. This + ** always either fails or finds the correct row - the cursor will + ** have been marked permanently CURSOR_INVALID if the open row has + ** been deleted. */ + int bDiff = 0; + rc = sqlite3BtreeCursorRestore(p->pCsr, &bDiff); + assert( bDiff==0 || sqlite3BtreeCursorIsValidNN(p->pCsr)==0 ); + } + if( sqlite3BtreeCursorIsValidNN(p->pCsr) ){ + sqlite3_int64 iKey; + iKey = sqlite3BtreeIntegerKey(p->pCsr); + assert( v->apCsr[0]!=0 ); + assert( v->apCsr[0]->eCurType==CURTYPE_BTREE ); + sqlite3VdbePreUpdateHook( + v, v->apCsr[0], SQLITE_DELETE, p->zDb, p->pTab, iKey, -1, p->iCol + ); + } } + if( rc==SQLITE_OK ){ + rc = xCall(p->pCsr, iOffset+p->iOffset, n, z); + } +#else + rc = xCall(p->pCsr, iOffset+p->iOffset, n, z); #endif - rc = xCall(p->pCsr, iOffset+p->iOffset, n, z); sqlite3BtreeLeaveCursor(p->pCsr); if( rc==SQLITE_ABORT ){ sqlite3VdbeFinalize(v); @@ -104402,6 +105453,7 @@ struct SortSubtask { SorterCompare xCompare; /* Compare function to use */ SorterFile file; /* Temp file for level-0 PMAs */ SorterFile file2; /* Space for other PMAs */ + u64 nSpill; /* Total bytes written by this task */ }; @@ -104522,6 +105574,7 @@ struct PmaWriter { int iBufEnd; /* Last byte of buffer to write */ i64 iWriteOff; /* Offset of start of buffer in file */ sqlite3_file *pFd; /* File handle to write to */ + u64 nPmaSpill; /* Total number of bytes written */ }; /* @@ -104866,7 +105919,7 @@ static int vdbeSorterCompareTail( ){ UnpackedRecord *r2 = pTask->pUnpacked; if( *pbKey2Cached==0 ){ - sqlite3VdbeRecordUnpack(pTask->pSorter->pKeyInfo, nKey2, pKey2, r2); + sqlite3VdbeRecordUnpack(nKey2, pKey2, r2); *pbKey2Cached = 1; } return sqlite3VdbeRecordCompareWithSkip(nKey1, pKey1, r2, 1); @@ -104893,7 +105946,7 @@ static int vdbeSorterCompare( ){ UnpackedRecord *r2 = pTask->pUnpacked; if( !*pbKey2Cached ){ - sqlite3VdbeRecordUnpack(pTask->pSorter->pKeyInfo, nKey2, pKey2, r2); + sqlite3VdbeRecordUnpack(nKey2, pKey2, r2); *pbKey2Cached = 1; } return sqlite3VdbeRecordCompare(nKey1, pKey1, r2); @@ -104933,6 +105986,7 @@ static int vdbeSorterCompareText( ); } }else{ + assert( pTask->pSorter->pKeyInfo->aSortFlags!=0 ); assert( !(pTask->pSorter->pKeyInfo->aSortFlags[0]&KEYINFO_ORDER_BIGNULL) ); if( pTask->pSorter->pKeyInfo->aSortFlags[0] ){ res = res * -1; @@ -104996,6 +106050,7 @@ static int vdbeSorterCompareInt( } } + assert( pTask->pSorter->pKeyInfo->aSortFlags!=0 ); if( res==0 ){ if( pTask->pSorter->pKeyInfo->nKeyField>1 ){ res = vdbeSorterCompareTail( @@ -105069,7 +106124,8 @@ SQLITE_PRIVATE int sqlite3VdbeSorterInit( assert( pCsr->eCurType==CURTYPE_SORTER ); assert( sizeof(KeyInfo) + UMXV(pCsr->pKeyInfo->nKeyField)*sizeof(CollSeq*) < 0x7fffffff ); - szKeyInfo = SZ_KEYINFO(pCsr->pKeyInfo->nKeyField); + assert( pCsr->pKeyInfo->nKeyField<=pCsr->pKeyInfo->nAllField ); + szKeyInfo = SZ_KEYINFO(pCsr->pKeyInfo->nAllField); sz = SZ_VDBESORTER(nWorker+1); pSorter = (VdbeSorter*)sqlite3DbMallocZero(db, sz + szKeyInfo); @@ -105083,7 +106139,12 @@ SQLITE_PRIVATE int sqlite3VdbeSorterInit( pKeyInfo->db = 0; if( nField && nWorker==0 ){ pKeyInfo->nKeyField = nField; + assert( nField<=pCsr->pKeyInfo->nAllField ); } + /* It is OK that pKeyInfo reuses the aSortFlags field from pCsr->pKeyInfo, + ** since the pCsr->pKeyInfo->aSortFlags[] array is invariant and lives + ** longer that pSorter. */ + assert( pKeyInfo->aSortFlags==pCsr->pKeyInfo->aSortFlags ); sqlite3BtreeEnter(pBt); pSorter->pgsz = pgsz = sqlite3BtreeGetPageSize(pBt); sqlite3BtreeLeave(pBt); @@ -105372,6 +106433,12 @@ SQLITE_PRIVATE void sqlite3VdbeSorterClose(sqlite3 *db, VdbeCursor *pCsr){ assert( pCsr->eCurType==CURTYPE_SORTER ); pSorter = pCsr->uc.pSorter; if( pSorter ){ + /* Increment db->nSpill by the total number of bytes of data written + ** to temp files by this sort operation. */ + int ii; + for(ii=0; iinTask; ii++){ + db->nSpill += pSorter->aTask[ii].nSpill; + } sqlite3VdbeSorterReset(db, pSorter); sqlite3_free(pSorter->list.aMemory); sqlite3DbFree(db, pSorter); @@ -105597,6 +106664,7 @@ static void vdbePmaWriteBlob(PmaWriter *p, u8 *pData, int nData){ &p->aBuffer[p->iBufStart], p->iBufEnd - p->iBufStart, p->iWriteOff + p->iBufStart ); + p->nPmaSpill += (p->iBufEnd - p->iBufStart); p->iBufStart = p->iBufEnd = 0; p->iWriteOff += p->nBuffer; } @@ -105613,17 +106681,20 @@ static void vdbePmaWriteBlob(PmaWriter *p, u8 *pData, int nData){ ** required. Otherwise, return an SQLite error code. ** ** Before returning, set *piEof to the offset immediately following the -** last byte written to the file. +** last byte written to the file. Also, increment (*pnSpill) by the total +** number of bytes written to the file. */ -static int vdbePmaWriterFinish(PmaWriter *p, i64 *piEof){ +static int vdbePmaWriterFinish(PmaWriter *p, i64 *piEof, u64 *pnSpill){ int rc; if( p->eFWErr==0 && ALWAYS(p->aBuffer) && p->iBufEnd>p->iBufStart ){ p->eFWErr = sqlite3OsWrite(p->pFd, &p->aBuffer[p->iBufStart], p->iBufEnd - p->iBufStart, p->iWriteOff + p->iBufStart ); + p->nPmaSpill += (p->iBufEnd - p->iBufStart); } *piEof = (p->iWriteOff + p->iBufEnd); + *pnSpill += p->nPmaSpill; sqlite3_free(p->aBuffer); rc = p->eFWErr; memset(p, 0, sizeof(PmaWriter)); @@ -105703,7 +106774,7 @@ static int vdbeSorterListToPMA(SortSubtask *pTask, SorterList *pList){ if( pList->aMemory==0 ) sqlite3_free(p); } pList->pList = p; - rc = vdbePmaWriterFinish(&writer, &pTask->file.iEof); + rc = vdbePmaWriterFinish(&writer, &pTask->file.iEof, &pTask->nSpill); } vdbeSorterWorkDebug(pTask, "exit"); @@ -106017,7 +107088,7 @@ static int vdbeIncrPopulate(IncrMerger *pIncr){ rc = vdbeMergeEngineStep(pIncr->pMerger, &dummy); } - rc2 = vdbePmaWriterFinish(&writer, &pOut->iEof); + rc2 = vdbePmaWriterFinish(&writer, &pOut->iEof, &pTask->nSpill); if( rc==SQLITE_OK ) rc = rc2; vdbeSorterPopulateDebug(pTask, "exit"); return rc; @@ -106863,7 +107934,7 @@ SQLITE_PRIVATE int sqlite3VdbeSorterCompare( assert( r2->nField==nKeyCol ); pKey = vdbeSorterRowkey(pSorter, &nKey); - sqlite3VdbeRecordUnpack(pKeyInfo, nKey, pKey, r2); + sqlite3VdbeRecordUnpack(nKey, pKey, r2); for(i=0; iaMem[i].flags & MEM_Null ){ *pRes = -1; @@ -108408,10 +109479,13 @@ static int lookupName( if( cnt>0 ){ if( pItem->fg.isUsing==0 || sqlite3IdListIndex(pItem->u3.pUsing, zCol)<0 + || pMatch==pItem ){ /* Two or more tables have the same column name which is - ** not joined by USING. This is an error. Signal as much - ** by clearing pFJMatch and letting cnt go above 1. */ + ** not joined by USING. Or, a single table has two columns + ** that match a USING term (if pMatch==pItem). These are both + ** "ambiguous column name" errors. Signal as much by clearing + ** pFJMatch and letting cnt go above 1. */ sqlite3ExprListDelete(db, pFJMatch); pFJMatch = 0; }else @@ -108961,8 +110035,8 @@ static void notValidImpl( /* ** Expression p should encode a floating point value between 1.0 and 0.0. -** Return 1024 times this value. Or return -1 if p is not a floating point -** value between 1.0 and 0.0. +** Return 134,217,728 (2^27) times this value. Or return -1 if p is not +** a floating point value between 1.0 and 0.0. */ static int exprProbability(Expr *p){ double r = -1.0; @@ -109393,11 +110467,13 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ return WRC_Prune; } #ifndef SQLITE_OMIT_SUBQUERY + case TK_EXISTS: case TK_SELECT: - case TK_EXISTS: testcase( pExpr->op==TK_EXISTS ); #endif case TK_IN: { testcase( pExpr->op==TK_IN ); + testcase( pExpr->op==TK_EXISTS ); + testcase( pExpr->op==TK_SELECT ); if( ExprUseXSelect(pExpr) ){ int nRef = pNC->nRef; testcase( pNC->ncFlags & NC_IsCheck ); @@ -109405,6 +110481,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ testcase( pNC->ncFlags & NC_IdxExpr ); testcase( pNC->ncFlags & NC_GenCol ); assert( pExpr->x.pSelect ); + if( pExpr->op==TK_EXISTS ) pParse->bHasExists = 1; if( pNC->ncFlags & NC_SelfRef ){ notValidImpl(pParse, pNC, "subqueries", pExpr, pExpr); }else{ @@ -110315,14 +111392,17 @@ SQLITE_PRIVATE int sqlite3ResolveSelfReference( SrcList *pSrc; /* Fake SrcList for pParse->pNewTable */ NameContext sNC; /* Name context for pParse->pNewTable */ int rc; - u8 srcSpace[SZ_SRCLIST_1]; /* Memory space for the fake SrcList */ + union { + SrcList sSrc; + u8 srcSpace[SZ_SRCLIST_1]; /* Memory space for the fake SrcList */ + } uSrc; assert( type==0 || pTab!=0 ); assert( type==NC_IsCheck || type==NC_PartIdx || type==NC_IdxExpr || type==NC_GenCol || pTab==0 ); memset(&sNC, 0, sizeof(sNC)); - pSrc = (SrcList*)srcSpace; - memset(pSrc, 0, SZ_SRCLIST_1); + memset(&uSrc, 0, sizeof(uSrc)); + pSrc = &uSrc.sSrc; if( pTab ){ pSrc->nSrc = 1; pSrc->a[0].zName = pTab->zName; @@ -111585,6 +112665,11 @@ SQLITE_PRIVATE void sqlite3ExprAddFunctionOrderBy( sqlite3ExprListDelete(db, pOrderBy); return; } + if( pOrderBy->nExpr>db->aLimit[SQLITE_LIMIT_COLUMN] ){ + sqlite3ErrorMsg(pParse, "too many terms in ORDER BY clause"); + sqlite3ExprListDelete(db, pOrderBy); + return; + } pOB = sqlite3ExprAlloc(db, TK_ORDER, 0, 0); if( pOB==0 ){ @@ -112719,6 +113804,85 @@ SQLITE_PRIVATE Expr *sqlite3ExprSimplifiedAndOr(Expr *pExpr){ return pExpr; } +/* +** Return true if it might be advantageous to compute the right operand +** of expression pExpr first, before the left operand. +** +** Normally the left operand is computed before the right operand. But if +** the left operand contains a subquery and the right does not, then it +** might be more efficient to compute the right operand first. +*/ +static int exprEvalRhsFirst(Expr *pExpr){ + if( ExprHasProperty(pExpr->pLeft, EP_Subquery) + && !ExprHasProperty(pExpr->pRight, EP_Subquery) + ){ + return 1; + }else{ + return 0; + } +} + +/* +** Compute the two operands of a binary operator. +** +** If either operand contains a subquery, then the code strives to +** compute the operand containing the subquery second. If the other +** operand evalutes to NULL, then a jump is made. The address of the +** IsNull operand that does this jump is returned. The caller can use +** this to optimize the computation so as to avoid doing the potentially +** expensive subquery. +** +** If no optimization opportunities exist, return 0. +*/ +static int exprComputeOperands( + Parse *pParse, /* Parsing context */ + Expr *pExpr, /* The comparison expression */ + int *pR1, /* OUT: Register holding the left operand */ + int *pR2, /* OUT: Register holding the right operand */ + int *pFree1, /* OUT: Temp register to free if not zero */ + int *pFree2 /* OUT: Another temp register to free if not zero */ +){ + int addrIsNull; + int r1, r2; + Vdbe *v = pParse->pVdbe; + + assert( v!=0 ); + /* + ** If the left operand contains a (possibly expensive) subquery and the + ** right operand does not and the right operation might be NULL, + ** then compute the right operand first and do an IsNull jump if the + ** right operand evalutes to NULL. + */ + if( exprEvalRhsFirst(pExpr) && sqlite3ExprCanBeNull(pExpr->pRight) ){ + r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, pFree2); + addrIsNull = sqlite3VdbeAddOp1(v, OP_IsNull, r2); + VdbeComment((v, "skip left operand")); + VdbeCoverage(v); + }else{ + r2 = 0; /* Silence a false-positive uninit-var warning in MSVC */ + addrIsNull = 0; + } + r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, pFree1); + if( addrIsNull==0 ){ + /* + ** If the right operand contains a subquery and the left operand does not + ** and the left operand might be NULL, then do an IsNull check + ** check on the left operand before computing the right operand. + */ + if( ExprHasProperty(pExpr->pRight, EP_Subquery) + && sqlite3ExprCanBeNull(pExpr->pLeft) + ){ + addrIsNull = sqlite3VdbeAddOp1(v, OP_IsNull, r1); + VdbeComment((v, "skip right operand")); + VdbeCoverage(v); + } + r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, pFree2); + } + *pR1 = r1; + *pR2 = r2; + return addrIsNull; +} + /* ** pExpr is a TK_FUNCTION node. Try to determine whether or not the ** function is a constant function. A function is constant if all of @@ -114163,17 +115327,23 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(Parse *pParse, Expr *pExpr){ VdbeComment((v, "Init EXISTS result")); } if( pSel->pLimit ){ - /* The subquery already has a limit. If the pre-existing limit is X - ** then make the new limit X<>0 so that the new limit is either 1 or 0 */ - sqlite3 *db = pParse->db; - pLimit = sqlite3Expr(db, TK_INTEGER, "0"); - if( pLimit ){ - pLimit->affExpr = SQLITE_AFF_NUMERIC; - pLimit = sqlite3PExpr(pParse, TK_NE, - sqlite3ExprDup(db, pSel->pLimit->pLeft, 0), pLimit); + /* The subquery already has a limit. If the pre-existing limit X is + ** not already integer value 1 or 0, then make the new limit X<>0 so that + ** the new limit is either 1 or 0 */ + Expr *pLeft = pSel->pLimit->pLeft; + if( ExprHasProperty(pLeft, EP_IntValue)==0 + || (pLeft->u.iValue!=1 && pLeft->u.iValue!=0) + ){ + sqlite3 *db = pParse->db; + pLimit = sqlite3Expr(db, TK_INTEGER, "0"); + if( pLimit ){ + pLimit->affExpr = SQLITE_AFF_NUMERIC; + pLimit = sqlite3PExpr(pParse, TK_NE, + sqlite3ExprDup(db, pLeft, 0), pLimit); + } + sqlite3ExprDeferredDelete(pParse, pLeft); + pSel->pLimit->pLeft = pLimit; } - sqlite3ExprDeferredDelete(pParse, pSel->pLimit->pLeft); - pSel->pLimit->pLeft = pLimit; }else{ /* If there is no pre-existing limit add a limit of 1 */ pLimit = sqlite3Expr(pParse->db, TK_INTEGER, "1"); @@ -114261,7 +115431,6 @@ static void sqlite3ExprCodeIN( int rRhsHasNull = 0; /* Register that is true if RHS contains NULL values */ int eType; /* Type of the RHS */ int rLhs; /* Register(s) holding the LHS values */ - int rLhsOrig; /* LHS values prior to reordering by aiMap[] */ Vdbe *v; /* Statement under construction */ int *aiMap = 0; /* Map from vector field to index column */ char *zAff = 0; /* Affinity string for comparisons */ @@ -114324,19 +115493,8 @@ static void sqlite3ExprCodeIN( ** by code generated below. */ assert( pParse->okConstFactor==okConstFactor ); pParse->okConstFactor = 0; - rLhsOrig = exprCodeVector(pParse, pLeft, &iDummy); + rLhs = exprCodeVector(pParse, pLeft, &iDummy); pParse->okConstFactor = okConstFactor; - for(i=0; ix.pList; pColl = sqlite3ExprCollSeq(pParse, pExpr->pLeft); @@ -114392,6 +115551,26 @@ static void sqlite3ExprCodeIN( goto sqlite3ExprCodeIN_finished; } + if( eType!=IN_INDEX_ROWID ){ + /* If this IN operator will use an index, then the order of columns in the + ** vector might be different from the order in the index. In that case, + ** we need to reorder the LHS values to be in index order. Run Affinity + ** before reordering the columns, so that the affinity is correct. + */ + sqlite3VdbeAddOp4(v, OP_Affinity, rLhs, nVector, 0, zAff, nVector); + for(i=0; idb, aiMap); @@ -114615,7 +115793,12 @@ SQLITE_PRIVATE void sqlite3ExprCodeGeneratedColumn( iAddr = 0; } sqlite3ExprCodeCopy(pParse, sqlite3ColumnExpr(pTab,pCol), regOut); - if( pCol->affinity>=SQLITE_AFF_TEXT ){ + if( (pCol->colFlags & COLFLAG_VIRTUAL)!=0 + && (pTab->tabFlags & TF_Strict)!=0 + ){ + int p3 = 2+(int)(pCol - pTab->aCol); + sqlite3VdbeAddOp4(v, OP_TypeCheck, regOut, 1, p3, (char*)pTab, P4_TABLE); + }else if( pCol->affinity>=SQLITE_AFF_TEXT ){ sqlite3VdbeAddOp4(v, OP_Affinity, regOut, 1, 0, &pCol->affinity, 1); } if( iAddr ) sqlite3VdbeJumpHere(v, iAddr); @@ -115053,6 +116236,80 @@ static int exprPartidxExprLookup(Parse *pParse, Expr *pExpr, int iTarget){ return 0; } +/* +** Generate code that evaluates an AND or OR operator leaving a +** boolean result in a register. pExpr is the AND/OR expression. +** Store the result in the "target" register. Use short-circuit +** evaluation to avoid computing both operands, if possible. +** +** The code generated might require the use of a temporary register. +** If it does, then write the number of that temporary register +** into *pTmpReg. If not, leave *pTmpReg unchanged. +*/ +static SQLITE_NOINLINE int exprCodeTargetAndOr( + Parse *pParse, /* Parsing context */ + Expr *pExpr, /* AND or OR expression to be coded */ + int target, /* Put result in this register, guaranteed */ + int *pTmpReg /* Write a temporary register here */ +){ + int op; /* The opcode. TK_AND or TK_OR */ + int skipOp; /* Opcode for the branch that skips one operand */ + int addrSkip; /* Branch instruction that skips one of the operands */ + int regSS = 0; /* Register holding computed operand when other omitted */ + int r1, r2; /* Registers for left and right operands, respectively */ + Expr *pAlt; /* Alternative, simplified expression */ + Vdbe *v; /* statement being coded */ + + assert( pExpr!=0 ); + op = pExpr->op; + assert( op==TK_AND || op==TK_OR ); + assert( TK_AND==OP_And ); testcase( op==TK_AND ); + assert( TK_OR==OP_Or ); testcase( op==TK_OR ); + assert( pParse->pVdbe!=0 ); + v = pParse->pVdbe; + pAlt = sqlite3ExprSimplifiedAndOr(pExpr); + if( pAlt!=pExpr ){ + r1 = sqlite3ExprCodeTarget(pParse, pAlt, target); + sqlite3VdbeAddOp3(v, OP_And, r1, r1, target); + return target; + } + skipOp = op==TK_AND ? OP_IfNot : OP_If; + if( exprEvalRhsFirst(pExpr) ){ + /* Compute the right operand first. Skip the computation of the left + ** operand if the right operand fully determines the result */ + r2 = regSS = sqlite3ExprCodeTarget(pParse, pExpr->pRight, target); + addrSkip = sqlite3VdbeAddOp1(v, skipOp, r2); + VdbeComment((v, "skip left operand")); + VdbeCoverage(v); + r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, pTmpReg); + }else{ + /* Compute the left operand first */ + r1 = sqlite3ExprCodeTarget(pParse, pExpr->pLeft, target); + if( ExprHasProperty(pExpr->pRight, EP_Subquery) ){ + /* Skip over the computation of the right operand if the right + ** operand is a subquery and the left operand completely determines + ** the result */ + regSS = r1; + addrSkip = sqlite3VdbeAddOp1(v, skipOp, r1); + VdbeComment((v, "skip right operand")); + VdbeCoverage(v); + }else{ + addrSkip = regSS = 0; + } + r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, pTmpReg); + } + sqlite3VdbeAddOp3(v, op, r2, r1, target); + testcase( (*pTmpReg)==0 ); + if( addrSkip ){ + sqlite3VdbeAddOp2(v, OP_Goto, 0, sqlite3VdbeCurrentAddr(v)+2); + sqlite3VdbeJumpHere(v, addrSkip); + sqlite3VdbeAddOp3(v, OP_Or, regSS, regSS, target); + VdbeComment((v, "short-circut value")); + } + return target; +} + + /* ** Generate code into the current Vdbe to evaluate the given @@ -115308,11 +116565,17 @@ expr_code_doover: case TK_NE: case TK_EQ: { Expr *pLeft = pExpr->pLeft; + int addrIsNull = 0; if( sqlite3ExprIsVector(pLeft) ){ codeVectorCompare(pParse, pExpr, target, op, p5); }else{ - r1 = sqlite3ExprCodeTemp(pParse, pLeft, ®Free1); - r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, ®Free2); + if( ExprHasProperty(pExpr, EP_Subquery) && p5!=SQLITE_NULLEQ ){ + addrIsNull = exprComputeOperands(pParse, pExpr, + &r1, &r2, ®Free1, ®Free2); + }else{ + r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); + r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, ®Free2); + } sqlite3VdbeAddOp2(v, OP_Integer, 1, inReg); codeCompare(pParse, pLeft, pExpr->pRight, op, r1, r2, sqlite3VdbeCurrentAddr(v)+2, p5, @@ -115327,6 +116590,11 @@ expr_code_doover: sqlite3VdbeAddOp2(v, OP_Integer, 0, inReg); }else{ sqlite3VdbeAddOp3(v, OP_ZeroOrNull, r1, inReg, r2); + if( addrIsNull ){ + sqlite3VdbeAddOp2(v, OP_Goto, 0, sqlite3VdbeCurrentAddr(v)+2); + sqlite3VdbeJumpHere(v, addrIsNull); + sqlite3VdbeAddOp2(v, OP_Null, 0, inReg); + } } testcase( regFree1==0 ); testcase( regFree2==0 ); @@ -115334,7 +116602,10 @@ expr_code_doover: break; } case TK_AND: - case TK_OR: + case TK_OR: { + inReg = exprCodeTargetAndOr(pParse, pExpr, target, ®Free1); + break; + } case TK_PLUS: case TK_STAR: case TK_MINUS: @@ -115345,8 +116616,7 @@ expr_code_doover: case TK_LSHIFT: case TK_RSHIFT: case TK_CONCAT: { - assert( TK_AND==OP_And ); testcase( op==TK_AND ); - assert( TK_OR==OP_Or ); testcase( op==TK_OR ); + int addrIsNull; assert( TK_PLUS==OP_Add ); testcase( op==TK_PLUS ); assert( TK_MINUS==OP_Subtract ); testcase( op==TK_MINUS ); assert( TK_REM==OP_Remainder ); testcase( op==TK_REM ); @@ -115356,11 +116626,23 @@ expr_code_doover: assert( TK_LSHIFT==OP_ShiftLeft ); testcase( op==TK_LSHIFT ); assert( TK_RSHIFT==OP_ShiftRight ); testcase( op==TK_RSHIFT ); assert( TK_CONCAT==OP_Concat ); testcase( op==TK_CONCAT ); - r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); - r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, ®Free2); + if( ExprHasProperty(pExpr, EP_Subquery) ){ + addrIsNull = exprComputeOperands(pParse, pExpr, + &r1, &r2, ®Free1, ®Free2); + }else{ + r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); + r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, ®Free2); + addrIsNull = 0; + } sqlite3VdbeAddOp3(v, op, r2, r1, target); testcase( regFree1==0 ); testcase( regFree2==0 ); + if( addrIsNull ){ + sqlite3VdbeAddOp2(v, OP_Goto, 0, sqlite3VdbeCurrentAddr(v)+2); + sqlite3VdbeJumpHere(v, addrIsNull); + sqlite3VdbeAddOp2(v, OP_Null, 0, target); + VdbeComment((v, "short-circut value")); + } break; } case TK_UMINUS: { @@ -116228,17 +117510,27 @@ SQLITE_PRIVATE void sqlite3ExprIfTrue(Parse *pParse, Expr *pExpr, int dest, int Expr *pAlt = sqlite3ExprSimplifiedAndOr(pExpr); if( pAlt!=pExpr ){ sqlite3ExprIfTrue(pParse, pAlt, dest, jumpIfNull); - }else if( op==TK_AND ){ - int d2 = sqlite3VdbeMakeLabel(pParse); - testcase( jumpIfNull==0 ); - sqlite3ExprIfFalse(pParse, pExpr->pLeft, d2, - jumpIfNull^SQLITE_JUMPIFNULL); - sqlite3ExprIfTrue(pParse, pExpr->pRight, dest, jumpIfNull); - sqlite3VdbeResolveLabel(v, d2); }else{ - testcase( jumpIfNull==0 ); - sqlite3ExprIfTrue(pParse, pExpr->pLeft, dest, jumpIfNull); - sqlite3ExprIfTrue(pParse, pExpr->pRight, dest, jumpIfNull); + Expr *pFirst, *pSecond; + if( exprEvalRhsFirst(pExpr) ){ + pFirst = pExpr->pRight; + pSecond = pExpr->pLeft; + }else{ + pFirst = pExpr->pLeft; + pSecond = pExpr->pRight; + } + if( op==TK_AND ){ + int d2 = sqlite3VdbeMakeLabel(pParse); + testcase( jumpIfNull==0 ); + sqlite3ExprIfFalse(pParse, pFirst, d2, + jumpIfNull^SQLITE_JUMPIFNULL); + sqlite3ExprIfTrue(pParse, pSecond, dest, jumpIfNull); + sqlite3VdbeResolveLabel(v, d2); + }else{ + testcase( jumpIfNull==0 ); + sqlite3ExprIfTrue(pParse, pFirst, dest, jumpIfNull); + sqlite3ExprIfTrue(pParse, pSecond, dest, jumpIfNull); + } } break; } @@ -116277,10 +117569,16 @@ SQLITE_PRIVATE void sqlite3ExprIfTrue(Parse *pParse, Expr *pExpr, int dest, int case TK_GE: case TK_NE: case TK_EQ: { + int addrIsNull; if( sqlite3ExprIsVector(pExpr->pLeft) ) goto default_expr; - testcase( jumpIfNull==0 ); - r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); - r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, ®Free2); + if( ExprHasProperty(pExpr, EP_Subquery) && jumpIfNull!=SQLITE_NULLEQ ){ + addrIsNull = exprComputeOperands(pParse, pExpr, + &r1, &r2, ®Free1, ®Free2); + }else{ + r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); + r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, ®Free2); + addrIsNull = 0; + } codeCompare(pParse, pExpr->pLeft, pExpr->pRight, op, r1, r2, dest, jumpIfNull, ExprHasProperty(pExpr,EP_Commuted)); assert(TK_LT==OP_Lt); testcase(op==OP_Lt); VdbeCoverageIf(v,op==OP_Lt); @@ -116295,6 +117593,13 @@ SQLITE_PRIVATE void sqlite3ExprIfTrue(Parse *pParse, Expr *pExpr, int dest, int VdbeCoverageIf(v, op==OP_Ne && jumpIfNull!=SQLITE_NULLEQ); testcase( regFree1==0 ); testcase( regFree2==0 ); + if( addrIsNull ){ + if( jumpIfNull ){ + sqlite3VdbeChangeP2(v, addrIsNull, dest); + }else{ + sqlite3VdbeJumpHere(v, addrIsNull); + } + } break; } case TK_ISNULL: @@ -116402,17 +117707,27 @@ SQLITE_PRIVATE void sqlite3ExprIfFalse(Parse *pParse, Expr *pExpr, int dest, int Expr *pAlt = sqlite3ExprSimplifiedAndOr(pExpr); if( pAlt!=pExpr ){ sqlite3ExprIfFalse(pParse, pAlt, dest, jumpIfNull); - }else if( pExpr->op==TK_AND ){ - testcase( jumpIfNull==0 ); - sqlite3ExprIfFalse(pParse, pExpr->pLeft, dest, jumpIfNull); - sqlite3ExprIfFalse(pParse, pExpr->pRight, dest, jumpIfNull); }else{ - int d2 = sqlite3VdbeMakeLabel(pParse); - testcase( jumpIfNull==0 ); - sqlite3ExprIfTrue(pParse, pExpr->pLeft, d2, - jumpIfNull^SQLITE_JUMPIFNULL); - sqlite3ExprIfFalse(pParse, pExpr->pRight, dest, jumpIfNull); - sqlite3VdbeResolveLabel(v, d2); + Expr *pFirst, *pSecond; + if( exprEvalRhsFirst(pExpr) ){ + pFirst = pExpr->pRight; + pSecond = pExpr->pLeft; + }else{ + pFirst = pExpr->pLeft; + pSecond = pExpr->pRight; + } + if( pExpr->op==TK_AND ){ + testcase( jumpIfNull==0 ); + sqlite3ExprIfFalse(pParse, pFirst, dest, jumpIfNull); + sqlite3ExprIfFalse(pParse, pSecond, dest, jumpIfNull); + }else{ + int d2 = sqlite3VdbeMakeLabel(pParse); + testcase( jumpIfNull==0 ); + sqlite3ExprIfTrue(pParse, pFirst, d2, + jumpIfNull^SQLITE_JUMPIFNULL); + sqlite3ExprIfFalse(pParse, pSecond, dest, jumpIfNull); + sqlite3VdbeResolveLabel(v, d2); + } } break; } @@ -116454,10 +117769,16 @@ SQLITE_PRIVATE void sqlite3ExprIfFalse(Parse *pParse, Expr *pExpr, int dest, int case TK_GE: case TK_NE: case TK_EQ: { + int addrIsNull; if( sqlite3ExprIsVector(pExpr->pLeft) ) goto default_expr; - testcase( jumpIfNull==0 ); - r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); - r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, ®Free2); + if( ExprHasProperty(pExpr, EP_Subquery) && jumpIfNull!=SQLITE_NULLEQ ){ + addrIsNull = exprComputeOperands(pParse, pExpr, + &r1, &r2, ®Free1, ®Free2); + }else{ + r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); + r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, ®Free2); + addrIsNull = 0; + } codeCompare(pParse, pExpr->pLeft, pExpr->pRight, op, r1, r2, dest, jumpIfNull,ExprHasProperty(pExpr,EP_Commuted)); assert(TK_LT==OP_Lt); testcase(op==OP_Lt); VdbeCoverageIf(v,op==OP_Lt); @@ -116472,6 +117793,13 @@ SQLITE_PRIVATE void sqlite3ExprIfFalse(Parse *pParse, Expr *pExpr, int dest, int VdbeCoverageIf(v, op==OP_Ne && jumpIfNull==SQLITE_NULLEQ); testcase( regFree1==0 ); testcase( regFree2==0 ); + if( addrIsNull ){ + if( jumpIfNull ){ + sqlite3VdbeChangeP2(v, addrIsNull, dest); + }else{ + sqlite3VdbeJumpHere(v, addrIsNull); + } + } break; } case TK_ISNULL: @@ -123437,6 +124765,16 @@ SQLITE_PRIVATE Table *sqlite3LocateTable( if( pMod==0 && sqlite3_strnicmp(zName, "pragma_", 7)==0 ){ pMod = sqlite3PragmaVtabRegister(db, zName); } +#ifndef SQLITE_OMIT_JSON + if( pMod==0 && sqlite3_strnicmp(zName, "json", 4)==0 ){ + pMod = sqlite3JsonVtabRegister(db, zName); + } +#endif +#ifdef SQLITE_ENABLE_CARRAY + if( pMod==0 && sqlite3_stricmp(zName, "carray")==0 ){ + pMod = sqlite3CarrayRegister(db); + } +#endif if( pMod && sqlite3VtabEponymousTableInit(pParse, pMod) ){ testcase( pMod->pEpoTab==0 ); return pMod->pEpoTab; @@ -124075,7 +125413,7 @@ SQLITE_PRIVATE int sqlite3TableColumnToIndex(Index *pIdx, int iCol){ int i; i16 iCol16; assert( iCol>=(-1) && iCol<=SQLITE_MAX_COLUMN ); - assert( pIdx->nColumn<=SQLITE_MAX_COLUMN+1 ); + assert( pIdx->nColumn<=SQLITE_MAX_COLUMN*2 ); iCol16 = iCol; for(i=0; inColumn; i++){ if( iCol16==pIdx->aiColumn[i] ){ @@ -124372,6 +125710,9 @@ SQLITE_PRIVATE void sqlite3StartTable( sqlite3VdbeAddOp3(v, OP_Insert, 0, reg3, reg1); sqlite3VdbeChangeP5(v, OPFLAG_APPEND); sqlite3VdbeAddOp0(v, OP_Close); + }else if( db->init.imposterTable ){ + pTable->tabFlags |= TF_Imposter; + if( db->init.imposterTable>=2 ) pTable->tabFlags |= TF_Readonly; } /* Normal (non-error) return. */ @@ -128141,16 +129482,22 @@ SQLITE_PRIVATE void sqlite3SrcListIndexedBy(Parse *pParse, SrcList *p, Token *pI ** are deleted by this function. */ SQLITE_PRIVATE SrcList *sqlite3SrcListAppendList(Parse *pParse, SrcList *p1, SrcList *p2){ - assert( p1 && p1->nSrc==1 ); + assert( p1 ); + assert( p2 || pParse->nErr ); + assert( p2==0 || p2->nSrc>=1 ); + testcase( p1->nSrc==0 ); if( p2 ){ - SrcList *pNew = sqlite3SrcListEnlarge(pParse, p1, p2->nSrc, 1); + int nOld = p1->nSrc; + SrcList *pNew = sqlite3SrcListEnlarge(pParse, p1, p2->nSrc, nOld); if( pNew==0 ){ sqlite3SrcListDelete(pParse->db, p2); }else{ p1 = pNew; - memcpy(&p1->a[1], p2->a, p2->nSrc*sizeof(SrcItem)); + memcpy(&p1->a[nOld], p2->a, p2->nSrc*sizeof(SrcItem)); + assert( nOld==1 || (p2->a[0].fg.jointype & JT_LTORJ)==0 ); + assert( p1->nSrc>=1 ); + p1->a[0].fg.jointype |= (JT_LTORJ & p2->a[0].fg.jointype); sqlite3DbFree(pParse->db, p2); - p1->a[0].fg.jointype |= (JT_LTORJ & p1->a[1].fg.jointype); } } return p1; @@ -128661,14 +130008,19 @@ SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoOfIndex(Parse *pParse, Index *pIdx){ } if( pParse->nErr ){ assert( pParse->rc==SQLITE_ERROR_MISSING_COLLSEQ ); - if( pIdx->bNoQuery==0 ){ + if( pIdx->bNoQuery==0 + && sqlite3HashFind(&pIdx->pSchema->idxHash, pIdx->zName) + ){ /* Deactivate the index because it contains an unknown collating ** sequence. The only way to reactive the index is to reload the ** schema. Adding the missing collating sequence later does not ** reactive the index. The application had the chance to register ** the missing index using the collation-needed callback. For ** simplicity, SQLite will not give the application a second chance. - */ + ** + ** Except, do not do this if the index is not in the schema hash + ** table. In this case the index is currently being constructed + ** by a CREATE INDEX statement, and retrying will not help. */ pIdx->bNoQuery = 1; pParse->rc = SQLITE_ERROR_RETRY; } @@ -129305,6 +130657,7 @@ SQLITE_PRIVATE void sqlite3SchemaClear(void *p){ for(pElem=sqliteHashFirst(&temp2); pElem; pElem=sqliteHashNext(pElem)){ sqlite3DeleteTrigger(&xdb, (Trigger*)sqliteHashData(pElem)); } + sqlite3HashClear(&temp2); sqlite3HashInit(&pSchema->tblHash); for(pElem=sqliteHashFirst(&temp1); pElem; pElem=sqliteHashNext(pElem)){ @@ -130865,7 +132218,7 @@ static void *contextMalloc(sqlite3_context *context, i64 nByte){ sqlite3 *db = sqlite3_context_db_handle(context); assert( nByte>0 ); testcase( nByte==db->aLimit[SQLITE_LIMIT_LENGTH] ); - testcase( nByte==db->aLimit[SQLITE_LIMIT_LENGTH]+1 ); + testcase( nByte==(i64)db->aLimit[SQLITE_LIMIT_LENGTH]+1 ); if( nByte>db->aLimit[SQLITE_LIMIT_LENGTH] ){ sqlite3_result_error_toobig(context); z = 0; @@ -131536,7 +132889,7 @@ SQLITE_PRIVATE void sqlite3QuoteValue(StrAccum *pStr, sqlite3_value *pValue, int */ static int isNHex(const char *z, int N, u32 *pVal){ int i; - int v = 0; + u32 v = 0; for(i=0; i0 && nSep>0 ){ + if( bNotNull && nSep>0 ){ memcpy(&z[j], zSep, nSep); j += nSep; } memcpy(&z[j], v, k); j += k; + bNotNull = 1; } } } @@ -133016,6 +134371,502 @@ static void signFunc( sqlite3_result_int(context, x<0.0 ? -1 : x>0.0 ? +1 : 0); } +#if defined(SQLITE_ENABLE_PERCENTILE) +/*********************************************************************** +** This section implements the percentile(Y,P) SQL function and similar. +** Requirements: +** +** (1) The percentile(Y,P) function is an aggregate function taking +** exactly two arguments. +** +** (2) If the P argument to percentile(Y,P) is not the same for every +** row in the aggregate then an error is thrown. The word "same" +** in the previous sentence means that the value differ by less +** than 0.001. +** +** (3) If the P argument to percentile(Y,P) evaluates to anything other +** than a number in the range of 0.0 to 100.0 inclusive then an +** error is thrown. +** +** (4) If any Y argument to percentile(Y,P) evaluates to a value that +** is not NULL and is not numeric then an error is thrown. +** +** (5) If any Y argument to percentile(Y,P) evaluates to plus or minus +** infinity then an error is thrown. (SQLite always interprets NaN +** values as NULL.) +** +** (6) Both Y and P in percentile(Y,P) can be arbitrary expressions, +** including CASE WHEN expressions. +** +** (7) The percentile(Y,P) aggregate is able to handle inputs of at least +** one million (1,000,000) rows. +** +** (8) If there are no non-NULL values for Y, then percentile(Y,P) +** returns NULL. +** +** (9) If there is exactly one non-NULL value for Y, the percentile(Y,P) +** returns the one Y value. +** +** (10) If there N non-NULL values of Y where N is two or more and +** the Y values are ordered from least to greatest and a graph is +** drawn from 0 to N-1 such that the height of the graph at J is +** the J-th Y value and such that straight lines are drawn between +** adjacent Y values, then the percentile(Y,P) function returns +** the height of the graph at P*(N-1)/100. +** +** (11) The percentile(Y,P) function always returns either a floating +** point number or NULL. +** +** (12) The percentile(Y,P) is implemented as a single C99 source-code +** file that compiles into a shared-library or DLL that can be loaded +** into SQLite using the sqlite3_load_extension() interface. +** +** (13) A separate median(Y) function is the equivalent percentile(Y,50). +** +** (14) A separate percentile_cont(Y,P) function is equivalent to +** percentile(Y,P/100.0). In other words, the fraction value in +** the second argument is in the range of 0 to 1 instead of 0 to 100. +** +** (15) A separate percentile_disc(Y,P) function is like +** percentile_cont(Y,P) except that instead of returning the weighted +** average of the nearest two input values, it returns the next lower +** value. So the percentile_disc(Y,P) will always return a value +** that was one of the inputs. +** +** (16) All of median(), percentile(Y,P), percentile_cont(Y,P) and +** percentile_disc(Y,P) can be used as window functions. +** +** Differences from standard SQL: +** +** * The percentile_cont(X,P) function is equivalent to the following in +** standard SQL: +** +** (percentile_cont(P) WITHIN GROUP (ORDER BY X)) +** +** The SQLite syntax is much more compact. The standard SQL syntax +** is also supported if SQLite is compiled with the +** -DSQLITE_ENABLE_ORDERED_SET_AGGREGATES option. +** +** * No median(X) function exists in the SQL standard. App developers +** are expected to write "percentile_cont(0.5)WITHIN GROUP(ORDER BY X)". +** +** * No percentile(Y,P) function exists in the SQL standard. Instead of +** percential(Y,P), developers must write this: +** "percentile_cont(P/100.0) WITHIN GROUP (ORDER BY Y)". Note that +** the fraction parameter to percentile() goes from 0 to 100 whereas +** the fraction parameter in SQL standard percentile_cont() goes from +** 0 to 1. +** +** Implementation notes as of 2024-08-31: +** +** * The regular aggregate-function versions of these routines work +** by accumulating all values in an array of doubles, then sorting +** that array using quicksort before computing the answer. Thus +** the runtime is O(NlogN) where N is the number of rows of input. +** +** * For the window-function versions of these routines, the array of +** inputs is sorted as soon as the first value is computed. Thereafter, +** the array is kept in sorted order using an insert-sort. This +** results in O(N*K) performance where K is the size of the window. +** One can imagine alternative implementations that give O(N*logN*logK) +** performance, but they require more complex logic and data structures. +** The developers have elected to keep the asymptotically slower +** algorithm for now, for simplicity, under the theory that window +** functions are seldom used and when they are, the window size K is +** often small. The developers might revisit that decision later, +** should the need arise. +*/ + +/* The following object is the group context for a single percentile() +** aggregate. Remember all input Y values until the very end. +** Those values are accumulated in the Percentile.a[] array. +*/ +typedef struct Percentile Percentile; +struct Percentile { + u64 nAlloc; /* Number of slots allocated for a[] */ + u64 nUsed; /* Number of slots actually used in a[] */ + char bSorted; /* True if a[] is already in sorted order */ + char bKeepSorted; /* True if advantageous to keep a[] sorted */ + char bPctValid; /* True if rPct is valid */ + double rPct; /* Fraction. 0.0 to 1.0 */ + double *a; /* Array of Y values */ +}; + +/* +** Return TRUE if the input floating-point number is an infinity. +*/ +static int percentIsInfinity(double r){ + sqlite3_uint64 u; + assert( sizeof(u)==sizeof(r) ); + memcpy(&u, &r, sizeof(u)); + return ((u>>52)&0x7ff)==0x7ff; +} + +/* +** Return TRUE if two doubles differ by 0.001 or less. +*/ +static int percentSameValue(double a, double b){ + a -= b; + return a>=-0.001 && a<=0.001; +} + +/* +** Search p (which must have p->bSorted) looking for an entry with +** value y. Return the index of that entry. +** +** If bExact is true, return -1 if the entry is not found. +** +** If bExact is false, return the index at which a new entry with +** value y should be insert in order to keep the values in sorted +** order. The smallest return value in this case will be 0, and +** the largest return value will be p->nUsed. +*/ +static i64 percentBinarySearch(Percentile *p, double y, int bExact){ + i64 iFirst = 0; /* First element of search range */ + i64 iLast = (i64)p->nUsed - 1; /* Last element of search range */ + while( iLast>=iFirst ){ + i64 iMid = (iFirst+iLast)/2; + double x = p->a[iMid]; + if( xy ){ + iLast = iMid - 1; + }else{ + return iMid; + } + } + if( bExact ) return -1; + return iFirst; +} + +/* +** Generate an error for a percentile function. +** +** The error format string must have exactly one occurrence of "%%s()" +** (with two '%' characters). That substring will be replaced by the name +** of the function. +*/ +static void percentError(sqlite3_context *pCtx, const char *zFormat, ...){ + char *zMsg1; + char *zMsg2; + va_list ap; + + va_start(ap, zFormat); + zMsg1 = sqlite3_vmprintf(zFormat, ap); + va_end(ap); + zMsg2 = zMsg1 ? sqlite3_mprintf(zMsg1, sqlite3VdbeFuncName(pCtx)) : 0; + sqlite3_result_error(pCtx, zMsg2, -1); + sqlite3_free(zMsg1); + sqlite3_free(zMsg2); +} + +/* +** The "step" function for percentile(Y,P) is called once for each +** input row. +*/ +static void percentStep(sqlite3_context *pCtx, int argc, sqlite3_value **argv){ + Percentile *p; + double rPct; + int eType; + double y; + assert( argc==2 || argc==1 ); + + if( argc==1 ){ + /* Requirement 13: median(Y) is the same as percentile(Y,50). */ + rPct = 0.5; + }else{ + /* P must be a number between 0 and 100 for percentile() or between + ** 0.0 and 1.0 for percentile_cont() and percentile_disc(). + ** + ** The user-data is an integer which is 10 times the upper bound. + */ + double mxFrac = (SQLITE_PTR_TO_INT(sqlite3_user_data(pCtx))&2)? 100.0 : 1.0; + eType = sqlite3_value_numeric_type(argv[1]); + rPct = sqlite3_value_double(argv[1])/mxFrac; + if( (eType!=SQLITE_INTEGER && eType!=SQLITE_FLOAT) + || rPct<0.0 || rPct>1.0 + ){ + percentError(pCtx, "the fraction argument to %%s()" + " is not between 0.0 and %.1f", + (double)mxFrac); + return; + } + } + + /* Allocate the session context. */ + p = (Percentile*)sqlite3_aggregate_context(pCtx, sizeof(*p)); + if( p==0 ) return; + + /* Remember the P value. Throw an error if the P value is different + ** from any prior row, per Requirement (2). */ + if( !p->bPctValid ){ + p->rPct = rPct; + p->bPctValid = 1; + }else if( !percentSameValue(p->rPct,rPct) ){ + percentError(pCtx, "the fraction argument to %%s()" + " is not the same for all input rows"); + return; + } + + /* Ignore rows for which Y is NULL */ + eType = sqlite3_value_type(argv[0]); + if( eType==SQLITE_NULL ) return; + + /* If not NULL, then Y must be numeric. Otherwise throw an error. + ** Requirement 4 */ + if( eType!=SQLITE_INTEGER && eType!=SQLITE_FLOAT ){ + percentError(pCtx, "input to %%s() is not numeric"); + return; + } + + /* Throw an error if the Y value is infinity or NaN */ + y = sqlite3_value_double(argv[0]); + if( percentIsInfinity(y) ){ + percentError(pCtx, "Inf input to %%s()"); + return; + } + + /* Allocate and store the Y */ + if( p->nUsed>=p->nAlloc ){ + u64 n = p->nAlloc*2 + 250; + double *a = sqlite3_realloc64(p->a, sizeof(double)*n); + if( a==0 ){ + sqlite3_free(p->a); + memset(p, 0, sizeof(*p)); + sqlite3_result_error_nomem(pCtx); + return; + } + p->nAlloc = n; + p->a = a; + } + if( p->nUsed==0 ){ + p->a[p->nUsed++] = y; + p->bSorted = 1; + }else if( !p->bSorted || y>=p->a[p->nUsed-1] ){ + p->a[p->nUsed++] = y; + }else if( p->bKeepSorted ){ + i64 i; + i = percentBinarySearch(p, y, 0); + if( i<(int)p->nUsed ){ + memmove(&p->a[i+1], &p->a[i], (p->nUsed-i)*sizeof(p->a[0])); + } + p->a[i] = y; + p->nUsed++; + }else{ + p->a[p->nUsed++] = y; + p->bSorted = 0; + } +} + +/* +** Interchange two doubles. +*/ +#define SWAP_DOUBLE(X,Y) {double ttt=(X);(X)=(Y);(Y)=ttt;} + +/* +** Sort an array of doubles. +** +** Algorithm: quicksort +** +** This is implemented separately rather than using the qsort() routine +** from the standard library because: +** +** (1) To avoid a dependency on qsort() +** (2) To avoid the function call to the comparison routine for each +** comparison. +*/ +static void percentSort(double *a, unsigned int n){ + int iLt; /* Entries before a[iLt] are less than rPivot */ + int iGt; /* Entries at or after a[iGt] are greater than rPivot */ + int i; /* Loop counter */ + double rPivot; /* The pivot value */ + + assert( n>=2 ); + if( a[0]>a[n-1] ){ + SWAP_DOUBLE(a[0],a[n-1]) + } + if( n==2 ) return; + iGt = n-1; + i = n/2; + if( a[0]>a[i] ){ + SWAP_DOUBLE(a[0],a[i]) + }else if( a[i]>a[iGt] ){ + SWAP_DOUBLE(a[i],a[iGt]) + } + if( n==3 ) return; + rPivot = a[i]; + iLt = i = 1; + do{ + if( a[i]iLt ) SWAP_DOUBLE(a[i],a[iLt]) + iLt++; + i++; + }else if( a[i]>rPivot ){ + do{ + iGt--; + }while( iGt>i && a[iGt]>rPivot ); + SWAP_DOUBLE(a[i],a[iGt]) + }else{ + i++; + } + }while( i=2 ) percentSort(a, iLt); + if( n-iGt>=2 ) percentSort(a+iGt, n-iGt); + +/* Uncomment for testing */ +#if 0 + for(i=0; ibSorted==0 ){ + assert( p->nUsed>1 ); + percentSort(p->a, p->nUsed); + p->bSorted = 1; + } + p->bKeepSorted = 1; + + /* Find and remove the row */ + i = percentBinarySearch(p, y, 1); + if( i>=0 ){ + p->nUsed--; + if( i<(int)p->nUsed ){ + memmove(&p->a[i], &p->a[i+1], (p->nUsed - i)*sizeof(p->a[0])); + } + } +} + +/* +** Compute the final output of percentile(). Clean up all allocated +** memory if and only if bIsFinal is true. +*/ +static void percentCompute(sqlite3_context *pCtx, int bIsFinal){ + Percentile *p; + int settings = SQLITE_PTR_TO_INT(sqlite3_user_data(pCtx))&1; /* Discrete? */ + unsigned i1, i2; + double v1, v2; + double ix, vx; + p = (Percentile*)sqlite3_aggregate_context(pCtx, 0); + if( p==0 ) return; + if( p->a==0 ) return; + if( p->nUsed ){ + if( p->bSorted==0 ){ + assert( p->nUsed>1 ); + percentSort(p->a, p->nUsed); + p->bSorted = 1; + } + ix = p->rPct*(p->nUsed-1); + i1 = (unsigned)ix; + if( settings & 1 ){ + vx = p->a[i1]; + }else{ + i2 = ix==(double)i1 || i1==p->nUsed-1 ? i1 : i1+1; + v1 = p->a[i1]; + v2 = p->a[i2]; + vx = v1 + (v2-v1)*(ix-i1); + } + sqlite3_result_double(pCtx, vx); + } + if( bIsFinal ){ + sqlite3_free(p->a); + memset(p, 0, sizeof(*p)); + }else{ + p->bKeepSorted = 1; + } +} +static void percentFinal(sqlite3_context *pCtx){ + percentCompute(pCtx, 1); +} +static void percentValue(sqlite3_context *pCtx){ + percentCompute(pCtx, 0); +} +/****** End of percentile family of functions ******/ +#endif /* SQLITE_ENABLE_PERCENTILE */ + +#if defined(SQLITE_DEBUG) || defined(SQLITE_ENABLE_FILESTAT) +/* +** Implementation of sqlite_filestat(SCHEMA). +** +** Return JSON text that describes low-level debug/diagnostic information +** about the sqlite3_file object associated with SCHEMA. +*/ +static void filestatFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + sqlite3 *db = sqlite3_context_db_handle(context); + const char *zDbName; + sqlite3_str *pStr; + Btree *pBtree; + + zDbName = (const char*)sqlite3_value_text(argv[0]); + pBtree = sqlite3DbNameToBtree(db, zDbName); + if( pBtree ){ + Pager *pPager; + sqlite3_file *fd; + int rc; + sqlite3BtreeEnter(pBtree); + pPager = sqlite3BtreePager(pBtree); + assert( pPager!=0 ); + fd = sqlite3PagerFile(pPager); + pStr = sqlite3_str_new(db); + if( pStr==0 ){ + sqlite3_result_error_nomem(context); + }else{ + sqlite3_str_append(pStr, "{\"db\":", 6); + rc = sqlite3OsFileControl(fd, SQLITE_FCNTL_FILESTAT, pStr); + if( rc ) sqlite3_str_append(pStr, "null", 4); + fd = sqlite3PagerJrnlFile(pPager); + if( fd && fd->pMethods!=0 ){ + sqlite3_str_appendall(pStr, ",\"journal\":"); + rc = sqlite3OsFileControl(fd, SQLITE_FCNTL_FILESTAT, pStr); + if( rc ) sqlite3_str_append(pStr, "null", 4); + } + sqlite3_str_append(pStr, "}", 1); + sqlite3_result_text(context, sqlite3_str_finish(pStr), -1, + sqlite3_free); + } + sqlite3BtreeLeave(pBtree); + }else{ + sqlite3_result_text(context, "{}", 2, SQLITE_STATIC); + } +} +#endif /* SQLITE_DEBUG || SQLITE_ENABLE_FILESTAT */ + #ifdef SQLITE_DEBUG /* ** Implementation of fpdecode(x,y,z) function. @@ -133173,6 +135024,9 @@ SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(void){ INLINE_FUNC(likely, 1, INLINEFUNC_unlikely, SQLITE_FUNC_UNLIKELY), #ifdef SQLITE_ENABLE_OFFSET_SQL_FUNC INLINE_FUNC(sqlite_offset, 1, INLINEFUNC_sqlite_offset, 0 ), +#endif +#if defined(SQLITE_DEBUG) || defined(SQLITE_ENABLE_FILESTAT) + FUNCTION(sqlite_filestat, 1, 0, 0, filestatFunc ), #endif FUNCTION(ltrim, 1, 1, 0, trimFunc ), FUNCTION(ltrim, 2, 1, 0, trimFunc ), @@ -133246,6 +135100,21 @@ SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(void){ WAGGREGATE(string_agg, 2, 0, 0, groupConcatStep, groupConcatFinalize, groupConcatValue, groupConcatInverse, 0), +#ifdef SQLITE_ENABLE_PERCENTILE + WAGGREGATE(median, 1, 0,0, percentStep, + percentFinal, percentValue, percentInverse, + SQLITE_INNOCUOUS|SQLITE_SELFORDER1), + WAGGREGATE(percentile, 2, 0x2,0, percentStep, + percentFinal, percentValue, percentInverse, + SQLITE_INNOCUOUS|SQLITE_SELFORDER1), + WAGGREGATE(percentile_cont, 2, 0,0, percentStep, + percentFinal, percentValue, percentInverse, + SQLITE_INNOCUOUS|SQLITE_SELFORDER1), + WAGGREGATE(percentile_disc, 2, 0x1,0, percentStep, + percentFinal, percentValue, percentInverse, + SQLITE_INNOCUOUS|SQLITE_SELFORDER1), +#endif /* SQLITE_ENABLE_PERCENTILE */ + LIKEFUNC(glob, 2, &globInfo, SQLITE_FUNC_LIKE|SQLITE_FUNC_CASE), #ifdef SQLITE_CASE_SENSITIVE_LIKE LIKEFUNC(like, 2, &likeInfoAlt, SQLITE_FUNC_LIKE|SQLITE_FUNC_CASE), @@ -135000,12 +136869,15 @@ SQLITE_PRIVATE void sqlite3TableAffinity(Vdbe *v, Table *pTab, int iReg){ ** by one slot and insert a new OP_TypeCheck where the current ** OP_MakeRecord is found */ VdbeOp *pPrev; + int p3; sqlite3VdbeAppendP4(v, pTab, P4_TABLE); pPrev = sqlite3VdbeGetLastOp(v); assert( pPrev!=0 ); assert( pPrev->opcode==OP_MakeRecord || sqlite3VdbeDb(v)->mallocFailed ); pPrev->opcode = OP_TypeCheck; - sqlite3VdbeAddOp3(v, OP_MakeRecord, pPrev->p1, pPrev->p2, pPrev->p3); + p3 = pPrev->p3; + pPrev->p3 = 0; + sqlite3VdbeAddOp3(v, OP_MakeRecord, pPrev->p1, pPrev->p2, p3); }else{ /* Insert an isolated OP_Typecheck */ sqlite3VdbeAddOp2(v, OP_TypeCheck, iReg, pTab->nNVCol); @@ -138740,6 +140612,10 @@ struct sqlite3_api_routines { int (*set_clientdata)(sqlite3*, const char*, void*, void(*)(void*)); /* Version 3.50.0 and later */ int (*setlk_timeout)(sqlite3*,int,int); + /* Version 3.51.0 and later */ + int (*set_errmsg)(sqlite3*,int,const char*); + int (*db_status64)(sqlite3*,int,sqlite3_int64*,sqlite3_int64*,int); + }; /* @@ -139075,6 +140951,9 @@ typedef int (*sqlite3_loadext_entry)( #define sqlite3_set_clientdata sqlite3_api->set_clientdata /* Version 3.50.0 and later */ #define sqlite3_setlk_timeout sqlite3_api->setlk_timeout +/* Version 3.51.0 and later */ +#define sqlite3_set_errmsg sqlite3_api->set_errmsg +#define sqlite3_db_status64 sqlite3_api->db_status64 #endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */ #if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) @@ -139598,7 +141477,10 @@ static const sqlite3_api_routines sqlite3Apis = { sqlite3_get_clientdata, sqlite3_set_clientdata, /* Version 3.50.0 and later */ - sqlite3_setlk_timeout + sqlite3_setlk_timeout, + /* Version 3.51.0 and later */ + sqlite3_set_errmsg, + sqlite3_db_status64 }; /* True if x is the directory separator character @@ -141060,6 +142942,22 @@ static int integrityCheckResultRow(Vdbe *v){ return addr; } +/* +** Should table pTab be skipped when doing an integrity_check? +** Return true or false. +** +** If pObjTab is not null, the return true if pTab matches pObjTab. +** +** If pObjTab is null, then return true only if pTab is an imposter table. +*/ +static int tableSkipIntegrityCheck(const Table *pTab, const Table *pObjTab){ + if( pObjTab ){ + return pTab!=pObjTab; + }else{ + return (pTab->tabFlags & TF_Imposter)!=0; + } +} + /* ** Process a pragma statement. ** @@ -142405,7 +144303,7 @@ SQLITE_PRIVATE void sqlite3Pragma( Table *pTab = sqliteHashData(x); /* Current table */ Index *pIdx; /* An index on pTab */ int nIdx; /* Number of indexes on pTab */ - if( pObjTab && pObjTab!=pTab ) continue; + if( tableSkipIntegrityCheck(pTab,pObjTab) ) continue; if( HasRowid(pTab) ) cnt++; for(nIdx=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, nIdx++){ cnt++; } } @@ -142418,7 +144316,7 @@ SQLITE_PRIVATE void sqlite3Pragma( for(x=sqliteHashFirst(pTbls); x; x=sqliteHashNext(x)){ Table *pTab = sqliteHashData(x); Index *pIdx; - if( pObjTab && pObjTab!=pTab ) continue; + if( tableSkipIntegrityCheck(pTab,pObjTab) ) continue; if( HasRowid(pTab) ) aRoot[++cnt] = pTab->tnum; for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ aRoot[++cnt] = pIdx->tnum; @@ -142449,7 +144347,7 @@ SQLITE_PRIVATE void sqlite3Pragma( int iTab = 0; Table *pTab = sqliteHashData(x); Index *pIdx; - if( pObjTab && pObjTab!=pTab ) continue; + if( tableSkipIntegrityCheck(pTab,pObjTab) ) continue; if( HasRowid(pTab) ){ iTab = cnt++; }else{ @@ -142485,7 +144383,7 @@ SQLITE_PRIVATE void sqlite3Pragma( int r2; /* Previous key for WITHOUT ROWID tables */ int mxCol; /* Maximum non-virtual column number */ - if( pObjTab && pObjTab!=pTab ) continue; + if( tableSkipIntegrityCheck(pTab,pObjTab) ) continue; if( !IsOrdinaryTable(pTab) ) continue; if( isQuick || HasRowid(pTab) ){ pPk = 0; @@ -142809,7 +144707,7 @@ SQLITE_PRIVATE void sqlite3Pragma( Table *pTab = sqliteHashData(x); sqlite3_vtab *pVTab; int a1; - if( pObjTab && pObjTab!=pTab ) continue; + if( tableSkipIntegrityCheck(pTab,pObjTab) ) continue; if( IsOrdinaryTable(pTab) ) continue; if( !IsVirtual(pTab) ) continue; if( pTab->nCol<=0 ){ @@ -143041,6 +144939,8 @@ SQLITE_PRIVATE void sqlite3Pragma( eMode = SQLITE_CHECKPOINT_RESTART; }else if( sqlite3StrICmp(zRight, "truncate")==0 ){ eMode = SQLITE_CHECKPOINT_TRUNCATE; + }else if( sqlite3StrICmp(zRight, "noop")==0 ){ + eMode = SQLITE_CHECKPOINT_NOOP; } } pParse->nMem = 3; @@ -144607,9 +146507,11 @@ static int sqlite3LockAndPrepare( rc = sqlite3Prepare(db, zSql, nBytes, prepFlags, pOld, ppStmt, pzTail); assert( rc==SQLITE_OK || *ppStmt==0 ); if( rc==SQLITE_OK || db->mallocFailed ) break; - }while( (rc==SQLITE_ERROR_RETRY && (cnt++)errMask)==rc ); db->busyHandler.nBusy = 0; @@ -145224,7 +147126,7 @@ static int tableAndColumnIndex( int iEnd, /* Last member of pSrc->a[] to check */ const char *zCol, /* Name of the column we are looking for */ int *piTab, /* Write index of pSrc->a[] here */ - int *piCol, /* Write index of pSrc->a[*piTab].pTab->aCol[] here */ + int *piCol, /* Write index of pSrc->a[*piTab].pSTab->aCol[] here */ int bIgnoreHidden /* Ignore hidden columns */ ){ int i; /* For looping over tables in pSrc */ @@ -145283,8 +147185,7 @@ SQLITE_PRIVATE void sqlite3SetJoinExpr(Expr *p, int iTable, u32 joinFlag){ assert( !ExprHasProperty(p, EP_TokenOnly|EP_Reduced) ); ExprSetVVAProperty(p, EP_NoReduce); p->w.iJoin = iTable; - if( p->op==TK_FUNCTION ){ - assert( ExprUseXList(p) ); + if( ExprUseXList(p) ){ if( p->x.pList ){ int i; for(i=0; ix.pList->nExpr; i++){ @@ -145500,6 +147401,7 @@ static int sqlite3ProcessJoin(Parse *pParse, Select *p){ p->pWhere = sqlite3ExprAnd(pParse, p->pWhere, pRight->u3.pOn); pRight->u3.pOn = 0; pRight->fg.isOn = 1; + p->selFlags |= SF_OnToWhere; } } return 0; @@ -146386,7 +148288,10 @@ static void selectInnerLoop( */ SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoAlloc(sqlite3 *db, int N, int X){ int nExtra = (N+X)*(sizeof(CollSeq*)+1); - KeyInfo *p = sqlite3DbMallocRawNN(db, SZ_KEYINFO(0) + nExtra); + KeyInfo *p; + assert( X>=0 ); + if( NEVER(N+X>0xffff) ) return (KeyInfo*)sqlite3OomFault(db); + p = sqlite3DbMallocRawNN(db, SZ_KEYINFO(0) + nExtra); if( p ){ p->aSortFlags = (u8*)&p->aColl[N+X]; p->nKeyField = (u16)N; @@ -146953,6 +148858,10 @@ static void generateColumnTypes( #endif sqlite3VdbeSetColName(v, i, COLNAME_DECLTYPE, zType, SQLITE_TRANSIENT); } +#else + UNUSED_PARAMETER(pParse); + UNUSED_PARAMETER(pTabList); + UNUSED_PARAMETER(pEList); #endif /* !defined(SQLITE_OMIT_DECLTYPE) */ } @@ -147872,8 +149781,10 @@ static int multiSelect( int priorOp; /* The SRT_ operation to apply to prior selects */ Expr *pLimit; /* Saved values of p->nLimit */ int addr; + int emptyBypass = 0; /* IfEmpty opcode to bypass RHS */ SelectDest uniondest; + testcase( p->op==TK_EXCEPT ); testcase( p->op==TK_UNION ); priorOp = SRT_Union; @@ -147911,6 +149822,8 @@ static int multiSelect( */ if( p->op==TK_EXCEPT ){ op = SRT_Except; + emptyBypass = sqlite3VdbeAddOp1(v, OP_IfEmpty, unionTab); + VdbeCoverage(v); }else{ assert( p->op==TK_UNION ); op = SRT_Union; @@ -147931,6 +149844,7 @@ static int multiSelect( if( p->op==TK_UNION ){ p->nSelectRow = sqlite3LogEstAdd(p->nSelectRow, pPrior->nSelectRow); } + if( emptyBypass ) sqlite3VdbeJumpHere(v, emptyBypass); sqlite3ExprDelete(db, p->pLimit); p->pLimit = pLimit; p->iLimit = 0; @@ -147961,9 +149875,10 @@ static int multiSelect( int tab1, tab2; int iCont, iBreak, iStart; Expr *pLimit; - int addr; + int addr, iLimit, iOffset; SelectDest intersectdest; int r1; + int emptyBypass; /* INTERSECT is different from the others since it requires ** two temporary tables. Hence it has its own case. Begin @@ -147988,14 +149903,28 @@ static int multiSelect( goto multi_select_end; } + /* Initialize LIMIT counters before checking to see if the LHS + ** is empty, in case the jump is taken */ + iBreak = sqlite3VdbeMakeLabel(pParse); + computeLimitRegisters(pParse, p, iBreak); + emptyBypass = sqlite3VdbeAddOp1(v, OP_IfEmpty, tab1); VdbeCoverage(v); + /* Code the current SELECT into temporary table "tab2" */ addr = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, tab2, 0); assert( p->addrOpenEphm[1] == -1 ); p->addrOpenEphm[1] = addr; - p->pPrior = 0; + + /* Disable prior SELECTs and the LIMIT counters during the computation + ** of the RHS select */ pLimit = p->pLimit; + iLimit = p->iLimit; + iOffset = p->iOffset; + p->pPrior = 0; p->pLimit = 0; + p->iLimit = 0; + p->iOffset = 0; + intersectdest.iSDParm = tab2; ExplainQueryPlan((pParse, 1, "%s USING TEMP B-TREE", sqlite3SelectOpName(p->op))); @@ -148008,19 +149937,21 @@ static int multiSelect( p->nSelectRow = pPrior->nSelectRow; } sqlite3ExprDelete(db, p->pLimit); + + /* Reinstate the LIMIT counters prior to running the final intersect */ p->pLimit = pLimit; + p->iLimit = iLimit; + p->iOffset = iOffset; /* Generate code to take the intersection of the two temporary ** tables. */ if( rc ) break; assert( p->pEList ); - iBreak = sqlite3VdbeMakeLabel(pParse); - iCont = sqlite3VdbeMakeLabel(pParse); - computeLimitRegisters(pParse, p, iBreak); - sqlite3VdbeAddOp2(v, OP_Rewind, tab1, iBreak); VdbeCoverage(v); + sqlite3VdbeAddOp1(v, OP_Rewind, tab1); r1 = sqlite3GetTempReg(pParse); iStart = sqlite3VdbeAddOp2(v, OP_RowData, tab1, r1); + iCont = sqlite3VdbeMakeLabel(pParse); sqlite3VdbeAddOp4Int(v, OP_NotFound, tab2, iCont, r1, 0); VdbeCoverage(v); sqlite3ReleaseTempReg(pParse, r1); @@ -148030,6 +149961,7 @@ static int multiSelect( sqlite3VdbeAddOp2(v, OP_Next, tab1, iStart); VdbeCoverage(v); sqlite3VdbeResolveLabel(v, iBreak); sqlite3VdbeAddOp2(v, OP_Close, tab2, 0); + sqlite3VdbeJumpHere(v, emptyBypass); sqlite3VdbeAddOp2(v, OP_Close, tab1, 0); break; } @@ -148678,7 +150610,7 @@ static int multiSelectOrderBy( ** ## About "isOuterJoin": ** ** The isOuterJoin column indicates that the replacement will occur into a -** position in the parent that NULL-able due to an OUTER JOIN. Either the +** position in the parent that is NULL-able due to an OUTER JOIN. Either the ** target slot in the parent is the right operand of a LEFT JOIN, or one of ** the left operands of a RIGHT JOIN. In either case, we need to potentially ** bypass the substituted expression with OP_IfNullRow. @@ -148708,6 +150640,7 @@ typedef struct SubstContext { int iTable; /* Replace references to this table */ int iNewTable; /* New table number */ int isOuterJoin; /* Add TK_IF_NULL_ROW opcodes on each replacement */ + int nSelDepth; /* Depth of sub-query recursion. Top==1 */ ExprList *pEList; /* Replacement expressions */ ExprList *pCList; /* Collation sequences for replacement expr */ } SubstContext; @@ -148815,6 +150748,9 @@ static Expr *substExpr( if( pExpr->op==TK_IF_NULL_ROW && pExpr->iTable==pSubst->iTable ){ pExpr->iTable = pSubst->iNewTable; } + if( pExpr->op==TK_AGG_FUNCTION && pExpr->op2>=pSubst->nSelDepth ){ + pExpr->op2--; + } pExpr->pLeft = substExpr(pSubst, pExpr->pLeft); pExpr->pRight = substExpr(pSubst, pExpr->pRight); if( ExprUseXSelect(pExpr) ){ @@ -148852,6 +150788,7 @@ static void substSelect( SrcItem *pItem; int i; if( !p ) return; + pSubst->nSelDepth++; do{ substExprList(pSubst, p->pEList); substExprList(pSubst, p->pGroupBy); @@ -148869,6 +150806,7 @@ static void substSelect( } } }while( doPrior && (p = p->pPrior)!=0 ); + pSubst->nSelDepth--; } #endif /* !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) */ @@ -149480,7 +151418,7 @@ static int flattenSubquery( ** complete, since there may still exist Expr.pTab entries that ** refer to the subquery even after flattening. Ticket #3346. ** - ** pSubitem->pTab is always non-NULL by test restrictions and tests above. + ** pSubitem->pSTab is always non-NULL by test restrictions and tests above. */ if( ALWAYS(pSubitem->pSTab!=0) ){ Table *pTabToDel = pSubitem->pSTab; @@ -149510,17 +151448,12 @@ static int flattenSubquery( pSub = pSub1; for(pParent=p; pParent; pParent=pParent->pPrior, pSub=pSub->pPrior){ int nSubSrc; - u8 jointype = 0; - u8 ltorj = pSrc->a[iFrom].fg.jointype & JT_LTORJ; + u8 jointype = pSubitem->fg.jointype; assert( pSub!=0 ); pSubSrc = pSub->pSrc; /* FROM clause of subquery */ nSubSrc = pSubSrc->nSrc; /* Number of terms in subquery FROM clause */ pSrc = pParent->pSrc; /* FROM clause of the outer query */ - if( pParent==p ){ - jointype = pSubitem->fg.jointype; /* First time through the loop */ - } - /* The subquery uses a single slot of the FROM clause of the outer ** query. If the subquery has more than one element in its FROM clause, ** then expand the outer query to make space for it to hold all elements @@ -149540,6 +151473,7 @@ static int flattenSubquery( pSrc = sqlite3SrcListEnlarge(pParse, pSrc, nSubSrc-1,iFrom+1); if( pSrc==0 ) break; pParent->pSrc = pSrc; + pSubitem = &pSrc->a[iFrom]; } /* Transfer the FROM clause terms from the subquery into the @@ -149554,11 +151488,10 @@ static int flattenSubquery( || pItem->u4.zDatabase==0 ); if( pItem->fg.isUsing ) sqlite3IdListDelete(db, pItem->u3.pUsing); *pItem = pSubSrc->a[i]; - pItem->fg.jointype |= ltorj; + pItem->fg.jointype |= (jointype & JT_LTORJ); memset(&pSubSrc->a[i], 0, sizeof(pSubSrc->a[i])); } - pSrc->a[iFrom].fg.jointype &= JT_LTORJ; - pSrc->a[iFrom].fg.jointype |= jointype | ltorj; + pSubitem->fg.jointype |= jointype; /* Now begin substituting subquery result set expressions for ** references to the iParent in the outer query. @@ -149610,6 +151543,7 @@ static int flattenSubquery( x.iTable = iParent; x.iNewTable = iNewParent; x.isOuterJoin = isOuterJoin; + x.nSelDepth = 0; x.pEList = pSub->pEList; x.pCList = findLeftmostExprlist(pSub); substSelect(&x, pParent, 0); @@ -150195,6 +152129,7 @@ static int pushDownWhereTerms( x.iTable = pSrc->iCursor; x.iNewTable = pSrc->iCursor; x.isOuterJoin = 0; + x.nSelDepth = 0; x.pEList = pSubq->pEList; x.pCList = findLeftmostExprlist(pSubq); pNew = substExpr(&x, pNew); @@ -150592,7 +152527,7 @@ SQLITE_PRIVATE With *sqlite3WithPush(Parse *pParse, With *pWith, u8 bFree){ ** CTE expression, through routine checks to see if the reference is ** a recursive reference to the CTE. ** -** If pFrom matches a CTE according to either of these two above, pFrom->pTab +** If pFrom matches a CTE according to either of these two above, pFrom->pSTab ** and other fields are populated accordingly. ** ** Return 0 if no match is found. @@ -151630,6 +153565,7 @@ static void resetAccumulator(Parse *pParse, AggInfo *pAggInfo){ if( pFunc->bOBPayload ){ /* extra columns for the function arguments */ assert( ExprUseXList(pFunc->pFExpr) ); + assert( pFunc->pFExpr->x.pList!=0 ); nExtra += pFunc->pFExpr->x.pList->nExpr; } if( pFunc->bUseSubtype ){ @@ -152219,6 +154155,193 @@ static int fromClauseTermCanBeCoroutine( return 1; } +/* +** Argument pWhere is the WHERE clause belonging to SELECT statement p. This +** function attempts to transform expressions of the form: +** +** EXISTS (SELECT ...) +** +** into joins. For example, given +** +** CREATE TABLE sailors(sid INTEGER PRIMARY KEY, name TEXT); +** CREATE TABLE reserves(sid INT, day DATE, PRIMARY KEY(sid, day)); +** +** SELECT name FROM sailors AS S WHERE EXISTS ( +** SELECT * FROM reserves AS R WHERE S.sid = R.sid AND R.day = '2022-10-25' +** ); +** +** the SELECT statement may be transformed as follows: +** +** SELECT name FROM sailors AS S, reserves AS R +** WHERE S.sid = R.sid AND R.day = '2022-10-25'; +** +** **Approximately**. Really, we have to ensure that the FROM-clause term +** that was formerly inside the EXISTS is only executed once. This is handled +** by setting the SrcItem.fg.fromExists flag, which then causes code in +** the where.c file to exit the corresponding loop after the first successful +** match (if any). +*/ +static SQLITE_NOINLINE void existsToJoin( + Parse *pParse, /* Parsing context */ + Select *p, /* The SELECT statement being optimized */ + Expr *pWhere /* part of the WHERE clause currently being examined */ +){ + if( pParse->nErr==0 + && pWhere!=0 + && !ExprHasProperty(pWhere, EP_OuterON|EP_InnerON) + && ALWAYS(p->pSrc!=0) + && p->pSrc->nSrcop==TK_AND ){ + Expr *pRight = pWhere->pRight; + existsToJoin(pParse, p, pWhere->pLeft); + existsToJoin(pParse, p, pRight); + } + else if( pWhere->op==TK_EXISTS ){ + Select *pSub = pWhere->x.pSelect; + Expr *pSubWhere = pSub->pWhere; + if( pSub->pSrc->nSrc==1 + && (pSub->selFlags & SF_Aggregate)==0 + && !pSub->pSrc->a[0].fg.isSubquery + && pSub->pLimit==0 + ){ + memset(pWhere, 0, sizeof(*pWhere)); + pWhere->op = TK_INTEGER; + pWhere->u.iValue = 1; + ExprSetProperty(pWhere, EP_IntValue); + + assert( p->pWhere!=0 ); + pSub->pSrc->a[0].fg.fromExists = 1; + pSub->pSrc->a[0].fg.jointype |= JT_CROSS; + p->pSrc = sqlite3SrcListAppendList(pParse, p->pSrc, pSub->pSrc); + if( pSubWhere ){ + p->pWhere = sqlite3PExpr(pParse, TK_AND, p->pWhere, pSubWhere); + pSub->pWhere = 0; + } + pSub->pSrc = 0; + sqlite3ParserAddCleanup(pParse, sqlite3SelectDeleteGeneric, pSub); +#if TREETRACE_ENABLED + if( sqlite3TreeTrace & 0x100000 ){ + TREETRACE(0x100000,pParse,p, + ("After EXISTS-to-JOIN optimization:\n")); + sqlite3TreeViewSelect(0, p, 0); + } +#endif + existsToJoin(pParse, p, pSubWhere); + } + } + } +} + +/* +** Type used for Walker callbacks by selectCheckOnClauses(). +*/ +typedef struct CheckOnCtx CheckOnCtx; +struct CheckOnCtx { + SrcList *pSrc; /* SrcList for this context */ + int iJoin; /* Cursor numbers must be =< than this */ + CheckOnCtx *pParent; /* Parent context */ +}; + +/* +** True if the SrcList passed as the only argument contains at least +** one RIGHT or FULL JOIN. False otherwise. +*/ +#define hasRightJoin(pSrc) (((pSrc)->a[0].fg.jointype & JT_LTORJ)!=0) + +/* +** The xExpr callback for the search of invalid ON clause terms. +*/ +static int selectCheckOnClausesExpr(Walker *pWalker, Expr *pExpr){ + CheckOnCtx *pCtx = pWalker->u.pCheckOnCtx; + + /* Check if pExpr is root or near-root of an ON clause constraint that needs + ** to be checked to ensure that it does not refer to tables in its FROM + ** clause to the right of itself. i.e. it is either: + ** + ** + an ON clause on an OUTER join, or + ** + an ON clause on an INNER join within a FROM that features at + ** least one RIGHT or FULL join. + */ + if( (ExprHasProperty(pExpr, EP_OuterON)) + || (ExprHasProperty(pExpr, EP_InnerON) && hasRightJoin(pCtx->pSrc)) + ){ + /* If CheckOnCtx.iJoin is already set, then fall through and process + ** this expression node as normal. Or, if CheckOnCtx.iJoin is still 0, + ** set it to the cursor number of the RHS of the join to which this + ** ON expression was attached and then iterate through the entire + ** expression. */ + assert( pCtx->iJoin==0 || pCtx->iJoin==pExpr->w.iJoin ); + if( pCtx->iJoin==0 ){ + pCtx->iJoin = pExpr->w.iJoin; + sqlite3WalkExprNN(pWalker, pExpr); + pCtx->iJoin = 0; + return WRC_Prune; + } + } + + if( pExpr->op==TK_COLUMN ){ + /* A column expression. Find the SrcList (if any) to which it refers. + ** Then, if CheckOnCtx.iJoin indicates that this expression is part of an + ** ON clause from that SrcList (i.e. if iJoin is non-zero), check that it + ** does not refer to a table to the right of CheckOnCtx.iJoin. */ + do { + SrcList *pSrc = pCtx->pSrc; + int iTab = pExpr->iTable; + if( iTab>=pSrc->a[0].iCursor && iTab<=pSrc->a[pSrc->nSrc-1].iCursor ){ + if( pCtx->iJoin && iTab>pCtx->iJoin ){ + sqlite3ErrorMsg(pWalker->pParse, + "ON clause references tables to its right"); + return WRC_Abort; + } + break; + } + pCtx = pCtx->pParent; + }while( pCtx ); + } + return WRC_Continue; +} + +/* +** The xSelect callback for the search of invalid ON clause terms. +*/ +static int selectCheckOnClausesSelect(Walker *pWalker, Select *pSelect){ + CheckOnCtx *pCtx = pWalker->u.pCheckOnCtx; + if( pSelect->pSrc==pCtx->pSrc || pSelect->pSrc->nSrc==0 ){ + return WRC_Continue; + }else{ + CheckOnCtx sCtx; + memset(&sCtx, 0, sizeof(sCtx)); + sCtx.pSrc = pSelect->pSrc; + sCtx.pParent = pCtx; + pWalker->u.pCheckOnCtx = &sCtx; + sqlite3WalkSelect(pWalker, pSelect); + pWalker->u.pCheckOnCtx = pCtx; + pSelect->selFlags &= ~SF_OnToWhere; + return WRC_Prune; + } +} + +/* +** Check all ON clauses in pSelect to verify that they do not reference +** columns to the right. +*/ +static void selectCheckOnClauses(Parse *pParse, Select *pSelect){ + Walker w; + CheckOnCtx sCtx; + assert( pSelect->selFlags & SF_OnToWhere ); + assert( pSelect->pSrc!=0 && pSelect->pSrc->nSrc>=2 ); + memset(&w, 0, sizeof(w)); + w.pParse = pParse; + w.xExprCallback = selectCheckOnClausesExpr; + w.xSelectCallback = selectCheckOnClausesSelect; + w.u.pCheckOnCtx = &sCtx; + memset(&sCtx, 0, sizeof(sCtx)); + sCtx.pSrc = pSelect->pSrc; + sqlite3WalkExprNN(&w, pSelect->pWhere); + pSelect->selFlags &= ~SF_OnToWhere; +} + /* ** Generate byte-code for the SELECT statement given in the p argument. ** @@ -152346,6 +154469,18 @@ SQLITE_PRIVATE int sqlite3Select( } #endif + /* If the SELECT statement contains ON clauses that were moved into + ** the WHERE clause, go through and verify that none of the terms + ** in the ON clauses reference tables to the right of the ON clause. + ** Do this now, after name resolution, but before query flattening + */ + if( p->selFlags & SF_OnToWhere ){ + selectCheckOnClauses(pParse, p); + if( pParse->nErr ){ + goto select_end; + } + } + /* If the SF_UFSrcCheck flag is set, then this function is being called ** as part of populating the temp table for an UPDATE...FROM statement. ** In this case, it is an error if the target object (pSrc->a[0]) name @@ -152587,6 +154722,13 @@ SQLITE_PRIVATE int sqlite3Select( } #endif + /* If there may be an "EXISTS (SELECT ...)" in the WHERE clause, attempt + ** to change it into a join. */ + if( pParse->bHasExists && OptimizationEnabled(db,SQLITE_ExistsToJoin) ){ + existsToJoin(pParse, p, p->pWhere); + pTabList = p->pSrc; + } + /* Do the WHERE-clause constant propagation optimization if this is ** a join. No need to spend time on this operation for non-join queries ** as the equivalent optimization will be handled by query planner in @@ -153374,12 +155516,12 @@ SQLITE_PRIVATE int sqlite3Select( ** for the next GROUP BY batch. */ sqlite3VdbeAddOp2(v, OP_Gosub, regOutputRow, addrOutputRow); - VdbeComment((v, "output one row")); + VdbeComment((v, "output one row of %d", p->selId)); sqlite3ExprCodeMove(pParse, iBMem, iAMem, pGroupBy->nExpr); sqlite3VdbeAddOp2(v, OP_IfPos, iAbortFlag, addrEnd); VdbeCoverage(v); VdbeComment((v, "check abort flag")); sqlite3VdbeAddOp2(v, OP_Gosub, regReset, addrReset); - VdbeComment((v, "reset accumulator")); + VdbeComment((v, "reset accumulator %d", p->selId)); /* Update the aggregate accumulators based on the content of ** the current row @@ -153387,7 +155529,7 @@ SQLITE_PRIVATE int sqlite3Select( sqlite3VdbeJumpHere(v, addr1); updateAccumulator(pParse, iUseFlag, pAggInfo, eDist); sqlite3VdbeAddOp2(v, OP_Integer, 1, iUseFlag); - VdbeComment((v, "indicate data in accumulator")); + VdbeComment((v, "indicate data in accumulator %d", p->selId)); /* End of the loop */ @@ -153404,7 +155546,7 @@ SQLITE_PRIVATE int sqlite3Select( /* Output the final row of result */ sqlite3VdbeAddOp2(v, OP_Gosub, regOutputRow, addrOutputRow); - VdbeComment((v, "output final row")); + VdbeComment((v, "output final row of %d", p->selId)); /* Jump over the subroutines */ @@ -153425,7 +155567,7 @@ SQLITE_PRIVATE int sqlite3Select( addrOutputRow = sqlite3VdbeCurrentAddr(v); sqlite3VdbeAddOp2(v, OP_IfPos, iUseFlag, addrOutputRow+2); VdbeCoverage(v); - VdbeComment((v, "Groupby result generator entry point")); + VdbeComment((v, "Groupby result generator entry point %d", p->selId)); sqlite3VdbeAddOp1(v, OP_Return, regOutputRow); finalizeAggFunctions(pParse, pAggInfo); sqlite3ExprIfFalse(pParse, pHaving, addrOutputRow+1, SQLITE_JUMPIFNULL); @@ -153433,14 +155575,14 @@ SQLITE_PRIVATE int sqlite3Select( &sDistinct, pDest, addrOutputRow+1, addrSetAbort); sqlite3VdbeAddOp1(v, OP_Return, regOutputRow); - VdbeComment((v, "end groupby result generator")); + VdbeComment((v, "end groupby result generator %d", p->selId)); /* Generate a subroutine that will reset the group-by accumulator */ sqlite3VdbeResolveLabel(v, addrReset); resetAccumulator(pParse, pAggInfo); sqlite3VdbeAddOp2(v, OP_Integer, 0, iUseFlag); - VdbeComment((v, "indicate accumulator empty")); + VdbeComment((v, "indicate accumulator %d empty", p->selId)); sqlite3VdbeAddOp1(v, OP_Return, regReset); if( distFlag!=0 && eDist!=WHERE_DISTINCT_NOOP ){ @@ -154904,7 +157046,10 @@ static void codeReturningTrigger( Returning *pReturning; Select sSelect; SrcList *pFrom; - u8 fromSpace[SZ_SRCLIST_1]; + union { + SrcList sSrc; + u8 fromSpace[SZ_SRCLIST_1]; + } uSrc; assert( v!=0 ); if( !pParse->bReturning ){ @@ -154920,8 +157065,8 @@ static void codeReturningTrigger( return; } memset(&sSelect, 0, sizeof(sSelect)); - pFrom = (SrcList*)fromSpace; - memset(pFrom, 0, SZ_SRCLIST_1); + memset(&uSrc, 0, sizeof(uSrc)); + pFrom = &uSrc.sSrc; sSelect.pEList = sqlite3ExprListDup(db, pReturning->pReturnEL, 0); sSelect.pSrc = pFrom; pFrom->nSrc = 1; @@ -157328,7 +159473,8 @@ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3RunVacuum( saved_nChange = db->nChange; saved_nTotalChange = db->nTotalChange; saved_mTrace = db->mTrace; - db->flags |= SQLITE_WriteSchema | SQLITE_IgnoreChecks | SQLITE_Comments; + db->flags |= SQLITE_WriteSchema | SQLITE_IgnoreChecks | SQLITE_Comments + | SQLITE_AttachCreate | SQLITE_AttachWrite; db->mDbFlags |= DBFLAG_PreferBuiltin | DBFLAG_Vacuum; db->flags &= ~(u64)(SQLITE_ForeignKeys | SQLITE_ReverseOrder | SQLITE_Defensive | SQLITE_CountRows); @@ -158833,9 +160979,12 @@ SQLITE_PRIVATE int sqlite3VtabEponymousTableInit(Parse *pParse, Module *pMod){ addModuleArgument(pParse, pTab, sqlite3DbStrDup(db, pTab->zName)); addModuleArgument(pParse, pTab, 0); addModuleArgument(pParse, pTab, sqlite3DbStrDup(db, pTab->zName)); + db->nSchemaLock++; rc = vtabCallConstructor(db, pTab, pMod, pModule->xConnect, &zErr); + db->nSchemaLock--; if( rc ){ sqlite3ErrorMsg(pParse, "%s", zErr); + pParse->rc = rc; sqlite3DbFree(db, zErr); sqlite3VtabEponymousTableClear(db, pMod); } @@ -159031,6 +161180,7 @@ struct WhereLevel { int iTabCur; /* The VDBE cursor used to access the table */ int iIdxCur; /* The VDBE cursor used to access pIdx */ int addrBrk; /* Jump here to break out of the loop */ + int addrHalt; /* Abort the query due to empty table or similar */ int addrNxt; /* Jump here to start the next IN combination */ int addrSkip; /* Jump here for next iteration of skip-scan */ int addrCont; /* Jump here to continue with the next loop cycle */ @@ -159236,6 +161386,9 @@ struct WhereTerm { u8 eMatchOp; /* Op for vtab MATCH/LIKE/GLOB/REGEXP terms */ int iParent; /* Disable pWC->a[iParent] when this term disabled */ int leftCursor; /* Cursor number of X in "X " */ +#ifdef SQLITE_DEBUG + int iTerm; /* Which WhereTerm is this, for debug purposes */ +#endif union { struct { int leftColumn; /* Column number of X in "X " */ @@ -159728,7 +161881,6 @@ SQLITE_PRIVATE void sqlite3WhereAddExplainText( #endif { VdbeOp *pOp = sqlite3VdbeGetOp(pParse->pVdbe, addr); - SrcItem *pItem = &pTabList->a[pLevel->iFrom]; sqlite3 *db = pParse->db; /* Database handle */ int isSearch; /* True for a SEARCH. False for SCAN. */ @@ -159751,7 +161903,10 @@ SQLITE_PRIVATE void sqlite3WhereAddExplainText( sqlite3StrAccumInit(&str, db, zBuf, sizeof(zBuf), SQLITE_MAX_LENGTH); str.printfFlags = SQLITE_PRINTF_INTERNAL; - sqlite3_str_appendf(&str, "%s %S", isSearch ? "SEARCH" : "SCAN", pItem); + sqlite3_str_appendf(&str, "%s %S%s", + isSearch ? "SEARCH" : "SCAN", + pItem, + pItem->fg.fromExists ? " EXISTS" : ""); if( (flags & (WHERE_IPK|WHERE_VIRTUALTABLE))==0 ){ const char *zFmt = 0; Index *pIdx; @@ -160995,6 +163150,7 @@ static SQLITE_NOINLINE void filterPullDown( int addrNxt, /* Jump here to bypass inner loops */ Bitmask notReady /* Loops that are not ready */ ){ + int saved_addrBrk; while( ++iLevel < pWInfo->nLevel ){ WhereLevel *pLevel = &pWInfo->a[iLevel]; WhereLoop *pLoop = pLevel->pWLoop; @@ -161003,7 +163159,7 @@ static SQLITE_NOINLINE void filterPullDown( /* ,--- Because sqlite3ConstructBloomFilter() has will not have set ** vvvvv--' pLevel->regFilter if this were true. */ if( NEVER(pLoop->prereq & notReady) ) continue; - assert( pLevel->addrBrk==0 ); + saved_addrBrk = pLevel->addrBrk; pLevel->addrBrk = addrNxt; if( pLoop->wsFlags & WHERE_IPK ){ WhereTerm *pTerm = pLoop->aLTerm[0]; @@ -161033,7 +163189,7 @@ static SQLITE_NOINLINE void filterPullDown( VdbeCoverage(pParse->pVdbe); } pLevel->regFilter = 0; - pLevel->addrBrk = 0; + pLevel->addrBrk = saved_addrBrk; } } @@ -161080,7 +163236,6 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( sqlite3 *db; /* Database connection */ SrcItem *pTabItem; /* FROM clause term being coded */ int addrBrk; /* Jump here to break out of the loop */ - int addrHalt; /* addrBrk for the outermost loop */ int addrCont; /* Jump here to continue with next cycle */ int iRowidReg = 0; /* Rowid is stored in this register, if not zero */ int iReleaseReg = 0; /* Temp register to free before returning */ @@ -161124,7 +163279,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( ** there are no IN operators in the constraints, the "addrNxt" label ** is the same as "addrBrk". */ - addrBrk = pLevel->addrBrk = pLevel->addrNxt = sqlite3VdbeMakeLabel(pParse); + addrBrk = pLevel->addrNxt = pLevel->addrBrk; addrCont = pLevel->addrCont = sqlite3VdbeMakeLabel(pParse); /* If this is the right table of a LEFT OUTER JOIN, allocate and @@ -161140,14 +163295,6 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( VdbeComment((v, "init LEFT JOIN match flag")); } - /* Compute a safe address to jump to if we discover that the table for - ** this loop is empty and can never contribute content. */ - for(j=iLevel; j>0; j--){ - if( pWInfo->a[j].iLeftJoin ) break; - if( pWInfo->a[j].pRJ ) break; - } - addrHalt = pWInfo->a[j].addrBrk; - /* Special case of a FROM clause subquery implemented as a co-routine */ if( pTabItem->fg.viaCoroutine ){ int regYield; @@ -161386,7 +163533,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( VdbeCoverageIf(v, pX->op==TK_GE); sqlite3ReleaseTempReg(pParse, rTemp); }else{ - sqlite3VdbeAddOp2(v, bRev ? OP_Last : OP_Rewind, iCur, addrHalt); + sqlite3VdbeAddOp2(v, bRev ? OP_Last : OP_Rewind, iCur, pLevel->addrHalt); VdbeCoverageIf(v, bRev==0); VdbeCoverageIf(v, bRev!=0); } @@ -161426,36 +163573,36 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( sqlite3VdbeChangeP5(v, SQLITE_AFF_NUMERIC | SQLITE_JUMPIFNULL); } }else if( pLoop->wsFlags & WHERE_INDEXED ){ - /* Case 4: A scan using an index. + /* Case 4: Search using an index. ** - ** The WHERE clause may contain zero or more equality - ** terms ("==" or "IN" operators) that refer to the N - ** left-most columns of the index. It may also contain - ** inequality constraints (>, <, >= or <=) on the indexed - ** column that immediately follows the N equalities. Only - ** the right-most column can be an inequality - the rest must - ** use the "==" and "IN" operators. For example, if the - ** index is on (x,y,z), then the following clauses are all - ** optimized: + ** The WHERE clause may contain zero or more equality + ** terms ("==" or "IN" or "IS" operators) that refer to the N + ** left-most columns of the index. It may also contain + ** inequality constraints (>, <, >= or <=) on the indexed + ** column that immediately follows the N equalities. Only + ** the right-most column can be an inequality - the rest must + ** use the "==", "IN", or "IS" operators. For example, if the + ** index is on (x,y,z), then the following clauses are all + ** optimized: ** - ** x=5 - ** x=5 AND y=10 - ** x=5 AND y<10 - ** x=5 AND y>5 AND y<10 - ** x=5 AND y=5 AND z<=10 + ** x=5 + ** x=5 AND y=10 + ** x=5 AND y<10 + ** x=5 AND y>5 AND y<10 + ** x=5 AND y=5 AND z<=10 ** - ** The z<10 term of the following cannot be used, only - ** the x=5 term: + ** The z<10 term of the following cannot be used, only + ** the x=5 term: ** - ** x=5 AND z<10 + ** x=5 AND z<10 ** - ** N may be zero if there are inequality constraints. - ** If there are no inequality constraints, then N is at - ** least one. + ** N may be zero if there are inequality constraints. + ** If there are no inequality constraints, then N is at + ** least one. ** - ** This case is also used when there are no WHERE clause - ** constraints but an index is selected anyway, in order - ** to force the output order to conform to an ORDER BY. + ** This case is also used when there are no WHERE clause + ** constraints but an index is selected anyway, in order + ** to force the output order to conform to an ORDER BY. */ static const u8 aStartOp[] = { 0, @@ -162181,7 +164328,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( codeCursorHint(pTabItem, pWInfo, pLevel, 0); pLevel->op = aStep[bRev]; pLevel->p1 = iCur; - pLevel->p2 = 1 + sqlite3VdbeAddOp2(v, aStart[bRev], iCur, addrHalt); + pLevel->p2 = 1 + sqlite3VdbeAddOp2(v, aStart[bRev],iCur,pLevel->addrHalt); VdbeCoverageIf(v, bRev==0); VdbeCoverageIf(v, bRev!=0); pLevel->p5 = SQLITE_STMTSTATUS_FULLSCAN_STEP; @@ -162453,7 +164600,10 @@ SQLITE_PRIVATE SQLITE_NOINLINE void sqlite3WhereRightJoinLoop( WhereLoop *pLoop = pLevel->pWLoop; SrcItem *pTabItem = &pWInfo->pTabList->a[pLevel->iFrom]; SrcList *pFrom; - u8 fromSpace[SZ_SRCLIST_1]; + union { + SrcList sSrc; + u8 fromSpace[SZ_SRCLIST_1]; + } uSrc; Bitmask mAll = 0; int k; @@ -162497,7 +164647,7 @@ SQLITE_PRIVATE SQLITE_NOINLINE void sqlite3WhereRightJoinLoop( sqlite3ExprDup(pParse->db, pTerm->pExpr, 0)); } } - pFrom = (SrcList*)fromSpace; + pFrom = &uSrc.sSrc; pFrom->nSrc = 1; pFrom->nAlloc = 1; memcpy(&pFrom->a[0], pTabItem, sizeof(SrcItem)); @@ -163492,7 +165642,7 @@ static int termIsEquivalence(Parse *pParse, Expr *pExpr, SrcList *pSrc){ if( ExprHasProperty(pExpr, EP_OuterON) ) return 0; /* (3) */ assert( pSrc!=0 ); if( pExpr->op==TK_IS - && pSrc->nSrc + && pSrc->nSrc>=2 && (pSrc->a[0].fg.jointype & JT_LTORJ)!=0 ){ return 0; /* (4) */ @@ -163668,6 +165818,9 @@ static void exprAnalyze( } assert( pWC->nTerm > idxTerm ); pTerm = &pWC->a[idxTerm]; +#ifdef SQLITE_DEBUG + pTerm->iTerm = idxTerm; +#endif pMaskSet = &pWInfo->sMaskSet; pExpr = pTerm->pExpr; assert( pExpr!=0 ); /* Because malloc() has not failed */ @@ -163711,21 +165864,7 @@ static void exprAnalyze( prereqAll |= x; extraRight = x-1; /* ON clause terms may not be used with an index ** on left table of a LEFT JOIN. Ticket #3015 */ - if( (prereqAll>>1)>=x ){ - sqlite3ErrorMsg(pParse, "ON clause references tables to its right"); - return; - } }else if( (prereqAll>>1)>=x ){ - /* The ON clause of an INNER JOIN references a table to its right. - ** Most other SQL database engines raise an error. But SQLite versions - ** 3.0 through 3.38 just put the ON clause constraint into the WHERE - ** clause and carried on. Beginning with 3.39, raise an error only - ** if there is a RIGHT or FULL JOIN in the query. This makes SQLite - ** more like other systems, and also preserves legacy. */ - if( ALWAYS(pSrc->nSrc>0) && (pSrc->a[0].fg.jointype & JT_LTORJ)!=0 ){ - sqlite3ErrorMsg(pParse, "ON clause references tables to its right"); - return; - } ExprClearProperty(pExpr, EP_InnerON); } } @@ -164082,7 +166221,7 @@ static void exprAnalyze( idxNew = whereClauseInsert(pWC, pNewExpr, TERM_VIRTUAL|TERM_DYNAMIC); testcase( idxNew==0 ); pNewTerm = &pWC->a[idxNew]; - pNewTerm->prereqRight = prereqExpr; + pNewTerm->prereqRight = prereqExpr | extraRight; pNewTerm->leftCursor = pLeft->iTable; pNewTerm->u.x.leftColumn = pLeft->iColumn; pNewTerm->eOperator = WO_AUX; @@ -164193,7 +166332,7 @@ static void whereAddLimitExpr( ** ** 1. The SELECT statement has a LIMIT clause, and ** 2. The SELECT statement is not an aggregate or DISTINCT query, and -** 3. The SELECT statement has exactly one object in its from clause, and +** 3. The SELECT statement has exactly one object in its FROM clause, and ** that object is a virtual table, and ** 4. There are no terms in the WHERE clause that will not be passed ** to the virtual table xBestIndex method. @@ -164230,8 +166369,22 @@ SQLITE_PRIVATE void SQLITE_NOINLINE sqlite3WhereAddLimit(WhereClause *pWC, Selec ** (leftCursor==iCsr) test below. */ continue; } - if( pWC->a[ii].leftCursor!=iCsr ) return; - if( pWC->a[ii].prereqRight!=0 ) return; + if( pWC->a[ii].leftCursor==iCsr && pWC->a[ii].prereqRight==0 ) continue; + + /* If this term has a parent with exactly one child, and the parent will + ** be passed through to xBestIndex, then this term can be ignored. */ + if( pWC->a[ii].iParent>=0 ){ + WhereTerm *pParent = &pWC->a[ pWC->a[ii].iParent ]; + if( pParent->leftCursor==iCsr + && pParent->prereqRight==0 + && pParent->nChild==1 + ){ + continue; + } + } + + /* This term will not be passed through. Do not add a LIMIT clause. */ + return; } /* Check condition (5). Return early if it is not met. */ @@ -164895,11 +167048,11 @@ static WhereTerm *whereScanNext(WhereScan *pScan){ pScan->pWC = pWC; pScan->k = k+1; #ifdef WHERETRACE_ENABLED - if( sqlite3WhereTrace & 0x20000 ){ + if( (sqlite3WhereTrace & 0x20000)!=0 && pScan->nEquiv>1 ){ int ii; - sqlite3DebugPrintf("SCAN-TERM %p: nEquiv=%d", - pTerm, pScan->nEquiv); - for(ii=0; iinEquiv; ii++){ + sqlite3DebugPrintf("EQUIVALENT TO {%d:%d} (due to TERM-%d):", + pScan->aiCur[0], pScan->aiColumn[0], pTerm->iTerm); + for(ii=1; iinEquiv; ii++){ sqlite3DebugPrintf(" {%d:%d}", pScan->aiCur[ii], pScan->aiColumn[ii]); } @@ -165670,7 +167823,9 @@ static SQLITE_NOINLINE void constructAutomaticIndex( VdbeCoverage(v); VdbeComment((v, "next row of %s", pSrc->pSTab->zName)); }else{ - addrTop = sqlite3VdbeAddOp1(v, OP_Rewind, pLevel->iTabCur); VdbeCoverage(v); + assert( pLevel->addrHalt ); + addrTop = sqlite3VdbeAddOp2(v, OP_Rewind,pLevel->iTabCur,pLevel->addrHalt); + VdbeCoverage(v); } if( pPartial ){ iContinue = sqlite3VdbeMakeLabel(pParse); @@ -165698,11 +167853,14 @@ static SQLITE_NOINLINE void constructAutomaticIndex( pSrc->u4.pSubq->regResult, pLevel->iIdxCur); sqlite3VdbeGoto(v, addrTop); pSrc->fg.viaCoroutine = 0; + sqlite3VdbeJumpHere(v, addrTop); }else{ sqlite3VdbeAddOp2(v, OP_Next, pLevel->iTabCur, addrTop+1); VdbeCoverage(v); sqlite3VdbeChangeP5(v, SQLITE_STMTSTATUS_AUTOINDEX); + if( (pSrc->fg.jointype & JT_LEFT)!=0 ){ + sqlite3VdbeJumpHere(v, addrTop); + } } - sqlite3VdbeJumpHere(v, addrTop); sqlite3ReleaseTempReg(pParse, regRecord); /* Jump here when skipping the initialization */ @@ -166854,6 +169012,7 @@ SQLITE_PRIVATE void sqlite3WhereTermPrint(WhereTerm *pTerm, int iTerm){ }else{ sqlite3_snprintf(sizeof(zLeft),zLeft,"left=%d", pTerm->leftCursor); } + iTerm = pTerm->iTerm = MAX(iTerm,pTerm->iTerm); sqlite3DebugPrintf( "TERM-%-3d %p %s %-12s op=%03x wtFlags=%04x", iTerm, pTerm, zType, zLeft, pTerm->eOperator, pTerm->wtFlags); @@ -167995,6 +170154,7 @@ static int whereLoopAddBtreeIndex( && pProbe->hasStat1!=0 && OptimizationEnabled(db, SQLITE_SkipScan) && pProbe->aiRowLogEst[saved_nEq+1]>=42 /* TUNING: Minimum for skip-scan */ + && pSrc->fg.fromExists==0 && (rc = whereLoopResize(db, pNew, pNew->nLTerm+1))==SQLITE_OK ){ LogEst nIter; @@ -169566,6 +171726,10 @@ static i8 wherePathSatisfiesOrderBy( && ((wctrlFlags&(WHERE_DISTINCTBY|WHERE_SORTBYGROUP))!=WHERE_DISTINCTBY) ){ obSat = obDone; + }else{ + /* No further ORDER BY terms may be matched. So this call should + ** return >=0, not -1. Clear isOrderDistinct to ensure it does so. */ + isOrderDistinct = 0; } break; } @@ -170311,8 +172475,15 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ ** mxChoice best-so-far paths. ** ** First look for an existing path among best-so-far paths - ** that covers the same set of loops and has the same isOrdered - ** setting as the current path candidate. + ** that: + ** (1) covers the same set of loops, and + ** (2) has a compatible isOrdered value. + ** + ** "Compatible isOrdered value" means either + ** (A) both have isOrdered==-1, or + ** (B) both have isOrder>=0, or + ** (C) ordering does not matter because this is the last round + ** of the solver. ** ** The term "((pTo->isOrdered^isOrdered)&0x80)==0" is equivalent ** to (pTo->isOrdered==(-1))==(isOrdered==(-1))" for the range @@ -170321,7 +172492,7 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ testcase( nTo==0 ); for(jj=0, pTo=aTo; jjmaskLoop==maskNew - && ((pTo->isOrdered^isOrdered)&0x80)==0 + && ( ((pTo->isOrdered^isOrdered)&0x80)==0 || iLoop==nLoop-1 ) ){ testcase( jj==nTo-1 ); break; @@ -170476,11 +172647,10 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ return SQLITE_ERROR; } - /* Find the lowest cost path. pFrom will be left pointing to that path */ + /* Only one path is available, which is the best path */ + assert( nFrom==1 ); pFrom = aFrom; - for(ii=1; iirCost>aFrom[ii].rCost ) pFrom = &aFrom[ii]; - } + assert( pWInfo->nLevel==nLoop ); /* Load the lowest cost path into pWInfo */ for(iLoop=0; iLoopnLevel; i++){ WhereLoop *p = pWInfo->a[i].pWLoop; if( p==0 ) break; - if( (p->wsFlags & WHERE_VIRTUALTABLE)!=0 ) continue; + if( (p->wsFlags & WHERE_VIRTUALTABLE)!=0 ){ + /* Treat a vtab scan as similar to a full-table scan */ + break; + } if( (p->wsFlags & (WHERE_COLUMN_EQ|WHERE_COLUMN_NULL|WHERE_COLUMN_IN))!=0 ){ u8 iTab = p->iTab; WhereLoop *pLoop; @@ -171551,6 +173724,14 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( pTab = pTabItem->pSTab; iDb = sqlite3SchemaToIndex(db, pTab->pSchema); pLoop = pLevel->pWLoop; + pLevel->addrBrk = sqlite3VdbeMakeLabel(pParse); + if( ii==0 || (pTabItem[0].fg.jointype & JT_LEFT)!=0 ){ + pLevel->addrHalt = pLevel->addrBrk; + }else if( pWInfo->a[ii-1].pRJ ){ + pLevel->addrHalt = pWInfo->a[ii-1].addrBrk; + }else{ + pLevel->addrHalt = pWInfo->a[ii-1].addrHalt; + } if( (pTab->tabFlags & TF_Ephemeral)!=0 || IsView(pTab) ){ /* Do nothing */ }else @@ -171602,6 +173783,13 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( sqlite3VdbeAddOp4Dup8(v, OP_ColumnsUsed, pTabItem->iCursor, 0, 0, (const u8*)&pTabItem->colUsed, P4_INT64); #endif + if( ii>=2 + && (pTabItem[0].fg.jointype & (JT_LTORJ|JT_LEFT))==0 + && pLevel->addrHalt==pWInfo->a[0].addrHalt + ){ + sqlite3VdbeAddOp2(v, OP_IfEmpty, pTabItem->iCursor, pWInfo->iBreak); + VdbeCoverage(v); + } }else{ sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName); } @@ -171858,6 +174046,23 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ sqlite3VdbeAddOp2(v, OP_Goto, 1, pLevel->p2); } #endif /* SQLITE_DISABLE_SKIPAHEAD_DISTINCT */ + if( pTabList->a[pLevel->iFrom].fg.fromExists && i==pWInfo->nLevel-1 ){ + /* If the EXISTS-to-JOIN optimization was applied, then the EXISTS + ** loop(s) will be the inner-most loops of the join. There might be + ** multiple EXISTS loops, but they will all be nested, and the join + ** order will not have been changed by the query planner. If the + ** inner-most EXISTS loop sees a single successful row, it should + ** break out of *all* EXISTS loops. But only the inner-most of the + ** nested EXISTS loops should do this breakout. */ + int nOuter = 0; /* Nr of outer EXISTS that this one is nested within */ + while( nOutera[pLevel[-nOuter-1].iFrom].fg.fromExists ) break; + nOuter++; + } + testcase( nOuter>0 ); + sqlite3VdbeAddOp2(v, OP_Goto, 0, pLevel[-nOuter].addrBrk); + VdbeComment((v, "EXISTS break")); + } /* The common case: Advance to the next row */ if( pLevel->addrCont ) sqlite3VdbeResolveLabel(v, pLevel->addrCont); sqlite3VdbeAddOp3(v, pLevel->op, pLevel->p1, pLevel->p2, pLevel->p3); @@ -174708,7 +176913,7 @@ static int windowExprGtZero(Parse *pParse, Expr *pExpr){ ** ** ROWS BETWEEN FOLLOWING AND FOLLOWING ** -** ... loop started by sqlite3WhereBegin() ... +** ... loop started by sqlite3WhereBegin() ... ** if( new partition ){ ** Gosub flush ** } @@ -175226,6 +177431,12 @@ SQLITE_PRIVATE void sqlite3WindowCodeStep( addrBreak2 = windowCodeOp(&s, WINDOW_AGGINVERSE, 0, 1); }else{ assert( pMWin->eEnd==TK_FOLLOWING ); + /* assert( regStart>=0 ); + ** regEnd = regEnd - regStart; + ** regStart = 0; */ + sqlite3VdbeAddOp3(v, OP_Subtract, regStart, regEnd, regEnd); + sqlite3VdbeAddOp2(v, OP_Integer, 0, regStart); + addrStart = sqlite3VdbeCurrentAddr(v); addrBreak1 = windowCodeOp(&s, WINDOW_RETURN_ROW, regEnd, 1); addrBreak2 = windowCodeOp(&s, WINDOW_AGGINVERSE, regStart, 1); @@ -181621,8 +183832,9 @@ static int analyzeFilterKeyword(const unsigned char *z, int lastToken){ ** Return the length (in bytes) of the token that begins at z[0]. ** Store the token type in *tokenType before returning. */ -SQLITE_PRIVATE int sqlite3GetToken(const unsigned char *z, int *tokenType){ - int i, c; +SQLITE_PRIVATE i64 sqlite3GetToken(const unsigned char *z, int *tokenType){ + i64 i; + int c; switch( aiClass[*z] ){ /* Switch on the character-class of the first byte ** of the token. See the comment on the CC_ defines ** above. */ @@ -181950,7 +184162,7 @@ SQLITE_PRIVATE int sqlite3GetToken(const unsigned char *z, int *tokenType){ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql){ int nErr = 0; /* Number of errors encountered */ void *pEngine; /* The LEMON-generated LALR(1) parser */ - int n = 0; /* Length of the next token token */ + i64 n = 0; /* Length of the next token token */ int tokenType; /* type of the next token */ int lastTokenParsed = -1; /* type of the previous token */ sqlite3 *db = pParse->db; /* The database connection */ @@ -182053,13 +184265,13 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql){ }else if( tokenType!=TK_QNUMBER ){ Token x; x.z = zSql; - x.n = n; + x.n = (u32)n; sqlite3ErrorMsg(pParse, "unrecognized token: \"%T\"", &x); break; } } pParse->sLastToken.z = zSql; - pParse->sLastToken.n = n; + pParse->sLastToken.n = (u32)n; sqlite3Parser(pEngine, tokenType, pParse->sLastToken); lastTokenParsed = tokenType; zSql += n; @@ -182135,7 +184347,7 @@ SQLITE_PRIVATE char *sqlite3Normalize( ){ sqlite3 *db; /* The database connection */ int i; /* Next unread byte of zSql[] */ - int n; /* length of current token */ + i64 n; /* length of current token */ int tokenType; /* type of current token */ int prevType = 0; /* Previous non-whitespace token */ int nParen; /* Number of nested levels of parentheses */ @@ -182713,9 +184925,6 @@ static int (*const sqlite3BuiltinExtensions[])(sqlite3*) = { sqlite3DbstatRegister, #endif sqlite3TestExtInit, -#if !defined(SQLITE_OMIT_VIRTUALTABLE) && !defined(SQLITE_OMIT_JSON) - sqlite3JsonTableFunctions, -#endif #ifdef SQLITE_ENABLE_STMTVTAB sqlite3StmtVtabInit, #endif @@ -184036,6 +186245,7 @@ SQLITE_PRIVATE void sqlite3LeaveMutexAndCloseZombie(sqlite3 *db){ /* Clear the TEMP schema separately and last */ if( db->aDb[1].pSchema ){ sqlite3SchemaClear(db->aDb[1].pSchema); + assert( db->aDb[1].pSchema->trigHash.count==0 ); } sqlite3VtabUnlockList(db); @@ -184171,6 +186381,9 @@ SQLITE_PRIVATE const char *sqlite3ErrName(int rc){ case SQLITE_OK: zName = "SQLITE_OK"; break; case SQLITE_ERROR: zName = "SQLITE_ERROR"; break; case SQLITE_ERROR_SNAPSHOT: zName = "SQLITE_ERROR_SNAPSHOT"; break; + case SQLITE_ERROR_RETRY: zName = "SQLITE_ERROR_RETRY"; break; + case SQLITE_ERROR_MISSING_COLLSEQ: + zName = "SQLITE_ERROR_MISSING_COLLSEQ"; break; case SQLITE_INTERNAL: zName = "SQLITE_INTERNAL"; break; case SQLITE_PERM: zName = "SQLITE_PERM"; break; case SQLITE_ABORT: zName = "SQLITE_ABORT"; break; @@ -185352,6 +187565,29 @@ SQLITE_API const char *sqlite3_errmsg(sqlite3 *db){ return z; } +/* +** Set the error code and error message associated with the database handle. +** +** This routine is intended to be called by outside extensions (ex: the +** Session extension). Internal logic should invoke sqlite3Error() or +** sqlite3ErrorWithMsg() directly. +*/ +SQLITE_API int sqlite3_set_errmsg(sqlite3 *db, int errcode, const char *zMsg){ + int rc = SQLITE_OK; + if( !sqlite3SafetyCheckOk(db) ){ + return SQLITE_MISUSE_BKPT; + } + sqlite3_mutex_enter(db->mutex); + if( zMsg ){ + sqlite3ErrorWithMsg(db, errcode, "%s", zMsg); + }else{ + sqlite3Error(db, errcode); + } + rc = sqlite3ApiExit(db, rc); + sqlite3_mutex_leave(db->mutex); + return rc; +} + /* ** Return the byte offset of the most recent error */ @@ -187176,13 +189412,15 @@ SQLITE_API int sqlite3_test_control(int op, ...){ break; } - /* sqlite3_test_control(SQLITE_TESTCTRL_IMPOSTER, db, dbName, onOff, tnum); + /* sqlite3_test_control(SQLITE_TESTCTRL_IMPOSTER, db, dbName, mode, tnum); ** ** This test control is used to create imposter tables. "db" is a pointer ** to the database connection. dbName is the database name (ex: "main" or - ** "temp") which will receive the imposter. "onOff" turns imposter mode on - ** or off. "tnum" is the root page of the b-tree to which the imposter - ** table should connect. + ** "temp") which will receive the imposter. "mode" turns imposter mode on + ** or off. mode==0 means imposter mode is off. mode==1 means imposter mode + ** is on. mode==2 means imposter mode is on but results in an imposter + ** table that is read-only unless writable_schema is on. "tnum" is the + ** root page of the b-tree to which the imposter table should connect. ** ** Enable imposter mode only when the schema has already been parsed. Then ** run a single CREATE TABLE statement to construct the imposter table in @@ -188419,6 +190657,13 @@ SQLITE_PRIVATE void sqlite3ConnectionClosed(sqlite3 *db){ #ifndef _FTSINT_H #define _FTSINT_H +/* +** Activate assert() only if SQLITE_TEST is enabled. +*/ +#if !defined(NDEBUG) && !defined(SQLITE_DEBUG) +# define NDEBUG 1 +#endif + /* #include */ /* #include */ /* #include */ @@ -188426,10 +190671,6 @@ SQLITE_PRIVATE void sqlite3ConnectionClosed(sqlite3 *db){ /* #include */ /* #include */ -#if !defined(NDEBUG) && !defined(SQLITE_DEBUG) -# define NDEBUG 1 -#endif - /* FTS3/FTS4 require virtual tables */ #ifdef SQLITE_OMIT_VIRTUALTABLE # undef SQLITE_ENABLE_FTS3 @@ -188872,13 +191113,6 @@ typedef sqlite3_int64 i64; /* 8-byte signed integer */ */ #define UNUSED_PARAMETER(x) (void)(x) -/* -** Activate assert() only if SQLITE_TEST is enabled. -*/ -#if !defined(NDEBUG) && !defined(SQLITE_DEBUG) -# define NDEBUG 1 -#endif - /* ** The TESTONLY macro is used to enclose variable declarations or ** other bits of code that are needed to support the arguments @@ -188899,7 +191133,7 @@ typedef sqlite3_int64 i64; /* 8-byte signed integer */ ** Macros needed to provide flexible arrays in a portable way */ #ifndef offsetof -# define offsetof(STRUCTURE,FIELD) ((size_t)((char*)&((STRUCTURE*)0)->FIELD)) +# define offsetof(ST,M) ((size_t)((char*)&((ST*)0)->M - (char*)0)) #endif #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) # define FLEXARRAY @@ -203153,8 +205387,8 @@ struct NodeWriter { ** to an appendable b-tree segment. */ struct IncrmergeWriter { - int nLeafEst; /* Space allocated for leaf blocks */ - int nWork; /* Number of leaf pages flushed */ + i64 nLeafEst; /* Space allocated for leaf blocks */ + i64 nWork; /* Number of leaf pages flushed */ sqlite3_int64 iAbsLevel; /* Absolute level of input segments */ int iIdx; /* Index of *output* segment in iAbsLevel+1 */ sqlite3_int64 iStart; /* Block number of first allocated block */ @@ -203900,7 +206134,7 @@ static int fts3IncrmergeWriter( ){ int rc; /* Return Code */ int i; /* Iterator variable */ - int nLeafEst = 0; /* Blocks allocated for leaf nodes */ + i64 nLeafEst = 0; /* Blocks allocated for leaf nodes */ sqlite3_stmt *pLeafEst = 0; /* SQL used to determine nLeafEst */ sqlite3_stmt *pFirstBlock = 0; /* SQL used to determine first block */ @@ -203910,7 +206144,7 @@ static int fts3IncrmergeWriter( sqlite3_bind_int64(pLeafEst, 1, iAbsLevel); sqlite3_bind_int64(pLeafEst, 2, pCsr->nSegment); if( SQLITE_ROW==sqlite3_step(pLeafEst) ){ - nLeafEst = sqlite3_column_int(pLeafEst, 0); + nLeafEst = sqlite3_column_int64(pLeafEst, 0); } rc = sqlite3_reset(pLeafEst); } @@ -205293,10 +207527,6 @@ SQLITE_PRIVATE int sqlite3Fts3Optimize(Fts3Table *p){ /* #include */ /* #include */ -#ifndef SQLITE_AMALGAMATION -typedef sqlite3_int64 i64; -#endif - /* ** Characters that may appear in the second argument to matchinfo(). */ @@ -210150,7 +212380,7 @@ static u32 jsonTranslateBlobToText( jsonAppendChar(pOut, '\''); break; case 'v': - jsonAppendRawNZ(pOut, "\\u0009", 6); + jsonAppendRawNZ(pOut, "\\u000b", 6); break; case 'x': if( sz2<4 ){ @@ -211000,19 +213230,27 @@ static void jsonReturnTextJsonFromBlob( ** ** If the value is a primitive, return it as an SQL value. ** If the value is an array or object, return it as either -** JSON text or the BLOB encoding, depending on the JSON_B flag -** on the userdata. +** JSON text or the BLOB encoding, depending on the eMode flag +** as follows: +** +** eMode==0 JSONB if the JSON_B flag is set in userdata or +** text if the JSON_B flag is omitted from userdata. +** +** eMode==1 Text +** +** eMode==2 JSONB */ static void jsonReturnFromBlob( JsonParse *pParse, /* Complete JSON parse tree */ u32 i, /* Index of the node */ sqlite3_context *pCtx, /* Return value for this function */ - int textOnly /* return text JSON. Disregard user-data */ + int eMode /* Format of return: text of JSONB */ ){ u32 n, sz; int rc; sqlite3 *db = sqlite3_context_db_handle(pCtx); + assert( eMode>=0 && eMode<=2 ); n = jsonbPayloadSize(pParse, i, &sz); if( n==0 ){ sqlite3_result_error(pCtx, "malformed JSON", -1); @@ -211053,7 +213291,19 @@ static void jsonReturnFromBlob( rc = sqlite3DecOrHexToI64(z, &iRes); sqlite3DbFree(db, z); if( rc==0 ){ - sqlite3_result_int64(pCtx, bNeg ? -iRes : iRes); + if( iRes<0 ){ + /* A hexadecimal literal with 16 significant digits and with the + ** high-order bit set is a negative integer in SQLite (and hence + ** iRes comes back as negative) but should be interpreted as a + ** positive value if it occurs within JSON. The value is too + ** large to appear as an SQLite integer so it must be converted + ** into floating point. */ + double r; + r = (double)*(sqlite3_uint64*)&iRes; + sqlite3_result_double(pCtx, bNeg ? -r : r); + }else{ + sqlite3_result_int64(pCtx, bNeg ? -iRes : iRes); + } }else if( rc==3 && bNeg ){ sqlite3_result_int64(pCtx, SMALLEST_INT64); }else if( rc==1 ){ @@ -211131,8 +213381,14 @@ static void jsonReturnFromBlob( } case JSONB_ARRAY: case JSONB_OBJECT: { - int flags = textOnly ? 0 : SQLITE_PTR_TO_INT(sqlite3_user_data(pCtx)); - if( flags & JSON_BLOB ){ + if( eMode==0 ){ + if( (SQLITE_PTR_TO_INT(sqlite3_user_data(pCtx)) & JSON_BLOB)!=0 ){ + eMode = 2; + }else{ + eMode = 1; + } + } + if( eMode==2 ){ sqlite3_result_blob(pCtx, &pParse->aBlob[i], sz+n, SQLITE_TRANSIENT); }else{ jsonReturnTextJsonFromBlob(pCtx, &pParse->aBlob[i], sz+n); @@ -212779,6 +215035,7 @@ struct JsonEachCursor { u32 nRoot; /* Size of the root path in bytes */ u8 eType; /* Type of the container for element i */ u8 bRecursive; /* True for json_tree(). False for json_each() */ + u8 eMode; /* 1 for json_each(). 2 for jsonb_each() */ u32 nParent; /* Current nesting depth */ u32 nParentAlloc; /* Space allocated for aParent[] */ JsonParent *aParent; /* Parent elements of i */ @@ -212790,6 +215047,8 @@ typedef struct JsonEachConnection JsonEachConnection; struct JsonEachConnection { sqlite3_vtab base; /* Base class - must be first */ sqlite3 *db; /* Database connection */ + u8 eMode; /* 1 for json_each(). 2 for jsonb_each() */ + u8 bRecursive; /* True for json_tree(). False for json_each() */ }; @@ -212832,6 +215091,8 @@ static int jsonEachConnect( if( pNew==0 ) return SQLITE_NOMEM; sqlite3_vtab_config(db, SQLITE_VTAB_INNOCUOUS); pNew->db = db; + pNew->eMode = argv[0][4]=='b' ? 2 : 1; + pNew->bRecursive = argv[0][4+pNew->eMode]=='t'; } return rc; } @@ -212843,8 +215104,8 @@ static int jsonEachDisconnect(sqlite3_vtab *pVtab){ return SQLITE_OK; } -/* constructor for a JsonEachCursor object for json_each(). */ -static int jsonEachOpenEach(sqlite3_vtab *p, sqlite3_vtab_cursor **ppCursor){ +/* constructor for a JsonEachCursor object for json_each()/json_tree(). */ +static int jsonEachOpen(sqlite3_vtab *p, sqlite3_vtab_cursor **ppCursor){ JsonEachConnection *pVtab = (JsonEachConnection*)p; JsonEachCursor *pCur; @@ -212852,21 +215113,13 @@ static int jsonEachOpenEach(sqlite3_vtab *p, sqlite3_vtab_cursor **ppCursor){ pCur = sqlite3DbMallocZero(pVtab->db, sizeof(*pCur)); if( pCur==0 ) return SQLITE_NOMEM; pCur->db = pVtab->db; + pCur->eMode = pVtab->eMode; + pCur->bRecursive = pVtab->bRecursive; jsonStringZero(&pCur->path); *ppCursor = &pCur->base; return SQLITE_OK; } -/* constructor for a JsonEachCursor object for json_tree(). */ -static int jsonEachOpenTree(sqlite3_vtab *p, sqlite3_vtab_cursor **ppCursor){ - int rc = jsonEachOpenEach(p, ppCursor); - if( rc==SQLITE_OK ){ - JsonEachCursor *pCur = (JsonEachCursor*)*ppCursor; - pCur->bRecursive = 1; - } - return rc; -} - /* Reset a JsonEachCursor back to its original state. Free any memory ** held. */ static void jsonEachCursorReset(JsonEachCursor *p){ @@ -213071,7 +215324,7 @@ static int jsonEachColumn( } case JEACH_VALUE: { u32 i = jsonSkipLabel(p); - jsonReturnFromBlob(&p->sParse, i, ctx, 1); + jsonReturnFromBlob(&p->sParse, i, ctx, p->eMode); if( (p->sParse.aBlob[i] & 0x0f)>=JSONB_ARRAY ){ sqlite3_result_subtype(ctx, JSON_SUBTYPE); } @@ -213315,36 +215568,7 @@ static sqlite3_module jsonEachModule = { jsonEachBestIndex, /* xBestIndex */ jsonEachDisconnect, /* xDisconnect */ 0, /* xDestroy */ - jsonEachOpenEach, /* xOpen - open a cursor */ - jsonEachClose, /* xClose - close a cursor */ - jsonEachFilter, /* xFilter - configure scan constraints */ - jsonEachNext, /* xNext - advance a cursor */ - jsonEachEof, /* xEof - check for end of scan */ - jsonEachColumn, /* xColumn - read data */ - jsonEachRowid, /* xRowid - read data */ - 0, /* xUpdate */ - 0, /* xBegin */ - 0, /* xSync */ - 0, /* xCommit */ - 0, /* xRollback */ - 0, /* xFindMethod */ - 0, /* xRename */ - 0, /* xSavepoint */ - 0, /* xRelease */ - 0, /* xRollbackTo */ - 0, /* xShadowName */ - 0 /* xIntegrity */ -}; - -/* The methods of the json_tree virtual table. */ -static sqlite3_module jsonTreeModule = { - 0, /* iVersion */ - 0, /* xCreate */ - jsonEachConnect, /* xConnect */ - jsonEachBestIndex, /* xBestIndex */ - jsonEachDisconnect, /* xDisconnect */ - 0, /* xDestroy */ - jsonEachOpenTree, /* xOpen - open a cursor */ + jsonEachOpen, /* xOpen - open a cursor */ jsonEachClose, /* xClose - close a cursor */ jsonEachFilter, /* xFilter - configure scan constraints */ jsonEachNext, /* xNext - advance a cursor */ @@ -213433,22 +215657,21 @@ SQLITE_PRIVATE void sqlite3RegisterJsonFunctions(void){ #if !defined(SQLITE_OMIT_VIRTUALTABLE) && !defined(SQLITE_OMIT_JSON) /* -** Register the JSON table-valued functions +** Register the JSON table-valued function named zName and return a +** pointer to its Module object. Return NULL if something goes wrong. */ -SQLITE_PRIVATE int sqlite3JsonTableFunctions(sqlite3 *db){ - int rc = SQLITE_OK; - static const struct { - const char *zName; - sqlite3_module *pModule; - } aMod[] = { - { "json_each", &jsonEachModule }, - { "json_tree", &jsonTreeModule }, - }; +SQLITE_PRIVATE Module *sqlite3JsonVtabRegister(sqlite3 *db, const char *zName){ unsigned int i; - for(i=0; iaModule, zName)==0 ); + for(i=0; i */ @@ -213553,7 +215776,7 @@ typedef unsigned int u32; # define NEVER(X) (X) #endif #ifndef offsetof -#define offsetof(STRUCTURE,FIELD) ((size_t)((char*)&((STRUCTURE*)0)->FIELD)) +# define offsetof(ST,M) ((size_t)((char*)&((ST*)0)->M - (char*)0)) #endif #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) # define FLEXARRAY @@ -214591,6 +216814,12 @@ static void resetCursor(RtreeCursor *pCsr){ pCsr->base.pVtab = (sqlite3_vtab*)pRtree; pCsr->pReadAux = pStmt; + /* The following will only fail if the previous sqlite3_step() call failed, + ** in which case the error has already been caught. This statement never + ** encounters an error within an sqlite3_column_xxx() function, as it + ** calls sqlite3_column_value(), which does not use malloc(). So it is safe + ** to ignore the error code here. */ + sqlite3_reset(pStmt); } /* @@ -227679,8 +229908,8 @@ typedef struct DbpageCursor DbpageCursor; struct DbpageCursor { sqlite3_vtab_cursor base; /* Base class. Must be first */ - int pgno; /* Current page number */ - int mxPgno; /* Last page to visit on this scan */ + Pgno pgno; /* Current page number */ + Pgno mxPgno; /* Last page to visit on this scan */ Pager *pPager; /* Pager being read/written */ DbPage *pPage1; /* Page 1 of the database */ int iDb; /* Index of database to analyze */ @@ -227817,7 +230046,7 @@ static int dbpageOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){ }else{ memset(pCsr, 0, sizeof(DbpageCursor)); pCsr->base.pVtab = pVTab; - pCsr->pgno = -1; + pCsr->pgno = 0; } *ppCursor = (sqlite3_vtab_cursor *)pCsr; @@ -227870,7 +230099,8 @@ static int dbpageFilter( sqlite3 *db = pTab->db; Btree *pBt; - (void)idxStr; + UNUSED_PARAMETER(idxStr); + UNUSED_PARAMETER(argc); /* Default setting is no rows of result */ pCsr->pgno = 1; @@ -227916,12 +230146,12 @@ static int dbpageColumn( int rc = SQLITE_OK; switch( i ){ case 0: { /* pgno */ - sqlite3_result_int(ctx, pCsr->pgno); + sqlite3_result_int64(ctx, (sqlite3_int64)pCsr->pgno); break; } case 1: { /* data */ DbPage *pDbPage = 0; - if( pCsr->pgno==((PENDING_BYTE/pCsr->szPage)+1) ){ + if( pCsr->pgno==(Pgno)((PENDING_BYTE/pCsr->szPage)+1) ){ /* The pending byte page. Assume it is zeroed out. Attempting to ** request this page from the page is an SQLITE_CORRUPT error. */ sqlite3_result_zeroblob(ctx, pCsr->szPage); @@ -227995,10 +230225,10 @@ static int dbpageUpdate( goto update_fail; } if( sqlite3_value_type(argv[0])==SQLITE_NULL ){ - pgno = (Pgno)sqlite3_value_int(argv[2]); + pgno = (Pgno)sqlite3_value_int64(argv[2]); isInsert = 1; }else{ - pgno = sqlite3_value_int(argv[0]); + pgno = (Pgno)sqlite3_value_int64(argv[0]); if( (Pgno)sqlite3_value_int(argv[1])!=pgno ){ zErr = "cannot insert"; goto update_fail; @@ -228050,7 +230280,8 @@ static int dbpageUpdate( memcpy(aPage, pData, szPage); pTab->pgnoTrunc = 0; } - }else{ + } + if( rc!=SQLITE_OK ){ pTab->pgnoTrunc = 0; } sqlite3PagerUnref(pDbPage); @@ -228133,6 +230364,536 @@ SQLITE_PRIVATE int sqlite3DbpageRegister(sqlite3 *db){ return SQLITE_OK; } #endif /* SQLITE_ENABLE_DBSTAT_VTAB */ /************** End of dbpage.c **********************************************/ +/************** Begin file carray.c ******************************************/ +/* +** 2016-06-29 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file implements a table-valued-function that +** returns the values in a C-language array. +** Examples: +** +** SELECT * FROM carray($ptr,5) +** +** The query above returns 5 integers contained in a C-language array +** at the address $ptr. $ptr is a pointer to the array of integers. +** The pointer value must be assigned to $ptr using the +** sqlite3_bind_pointer() interface with a pointer type of "carray". +** For example: +** +** static int aX[] = { 53, 9, 17, 2231, 4, 99 }; +** int i = sqlite3_bind_parameter_index(pStmt, "$ptr"); +** sqlite3_bind_pointer(pStmt, i, aX, "carray", 0); +** +** There is an optional third parameter to determine the datatype of +** the C-language array. Allowed values of the third parameter are +** 'int32', 'int64', 'double', 'char*', 'struct iovec'. Example: +** +** SELECT * FROM carray($ptr,10,'char*'); +** +** The default value of the third parameter is 'int32'. +** +** HOW IT WORKS +** +** The carray "function" is really a virtual table with the +** following schema: +** +** CREATE TABLE carray( +** value, +** pointer HIDDEN, +** count HIDDEN, +** ctype TEXT HIDDEN +** ); +** +** If the hidden columns "pointer" and "count" are unconstrained, then +** the virtual table has no rows. Otherwise, the virtual table interprets +** the integer value of "pointer" as a pointer to the array and "count" +** as the number of elements in the array. The virtual table steps through +** the array, element by element. +*/ +#if !defined(SQLITE_OMIT_VIRTUALTABLE) && defined(SQLITE_ENABLE_CARRAY) +/* #include "sqliteInt.h" */ +#if defined(_WIN32) || defined(__RTP__) || defined(_WRS_KERNEL) + struct iovec { + void *iov_base; + size_t iov_len; + }; +#else +# include +#endif + +/* +** Names of allowed datatypes +*/ +static const char *azCarrayType[] = { + "int32", "int64", "double", "char*", "struct iovec" +}; + +/* +** Structure used to hold the sqlite3_carray_bind() information +*/ +typedef struct carray_bind carray_bind; +struct carray_bind { + void *aData; /* The data */ + int nData; /* Number of elements */ + int mFlags; /* Control flags */ + void (*xDel)(void*); /* Destructor for aData */ +}; + + +/* carray_cursor is a subclass of sqlite3_vtab_cursor which will +** serve as the underlying representation of a cursor that scans +** over rows of the result +*/ +typedef struct carray_cursor carray_cursor; +struct carray_cursor { + sqlite3_vtab_cursor base; /* Base class - must be first */ + sqlite3_int64 iRowid; /* The rowid */ + void *pPtr; /* Pointer to the array of values */ + sqlite3_int64 iCnt; /* Number of integers in the array */ + unsigned char eType; /* One of the CARRAY_type values */ +}; + +/* +** The carrayConnect() method is invoked to create a new +** carray_vtab that describes the carray virtual table. +** +** Think of this routine as the constructor for carray_vtab objects. +** +** All this routine needs to do is: +** +** (1) Allocate the carray_vtab object and initialize all fields. +** +** (2) Tell SQLite (via the sqlite3_declare_vtab() interface) what the +** result set of queries against carray will look like. +*/ +static int carrayConnect( + sqlite3 *db, + void *pAux, + int argc, const char *const*argv, + sqlite3_vtab **ppVtab, + char **pzErr +){ + sqlite3_vtab *pNew; + int rc; + +/* Column numbers */ +#define CARRAY_COLUMN_VALUE 0 +#define CARRAY_COLUMN_POINTER 1 +#define CARRAY_COLUMN_COUNT 2 +#define CARRAY_COLUMN_CTYPE 3 + + rc = sqlite3_declare_vtab(db, + "CREATE TABLE x(value,pointer hidden,count hidden,ctype hidden)"); + if( rc==SQLITE_OK ){ + pNew = *ppVtab = sqlite3_malloc( sizeof(*pNew) ); + if( pNew==0 ) return SQLITE_NOMEM; + memset(pNew, 0, sizeof(*pNew)); + } + return rc; +} + +/* +** This method is the destructor for carray_cursor objects. +*/ +static int carrayDisconnect(sqlite3_vtab *pVtab){ + sqlite3_free(pVtab); + return SQLITE_OK; +} + +/* +** Constructor for a new carray_cursor object. +*/ +static int carrayOpen(sqlite3_vtab *p, sqlite3_vtab_cursor **ppCursor){ + carray_cursor *pCur; + pCur = sqlite3_malloc( sizeof(*pCur) ); + if( pCur==0 ) return SQLITE_NOMEM; + memset(pCur, 0, sizeof(*pCur)); + *ppCursor = &pCur->base; + return SQLITE_OK; +} + +/* +** Destructor for a carray_cursor. +*/ +static int carrayClose(sqlite3_vtab_cursor *cur){ + sqlite3_free(cur); + return SQLITE_OK; +} + + +/* +** Advance a carray_cursor to its next row of output. +*/ +static int carrayNext(sqlite3_vtab_cursor *cur){ + carray_cursor *pCur = (carray_cursor*)cur; + pCur->iRowid++; + return SQLITE_OK; +} + +/* +** Return values of columns for the row at which the carray_cursor +** is currently pointing. +*/ +static int carrayColumn( + sqlite3_vtab_cursor *cur, /* The cursor */ + sqlite3_context *ctx, /* First argument to sqlite3_result_...() */ + int i /* Which column to return */ +){ + carray_cursor *pCur = (carray_cursor*)cur; + sqlite3_int64 x = 0; + switch( i ){ + case CARRAY_COLUMN_POINTER: return SQLITE_OK; + case CARRAY_COLUMN_COUNT: x = pCur->iCnt; break; + case CARRAY_COLUMN_CTYPE: { + sqlite3_result_text(ctx, azCarrayType[pCur->eType], -1, SQLITE_STATIC); + return SQLITE_OK; + } + default: { + switch( pCur->eType ){ + case CARRAY_INT32: { + int *p = (int*)pCur->pPtr; + sqlite3_result_int(ctx, p[pCur->iRowid-1]); + return SQLITE_OK; + } + case CARRAY_INT64: { + sqlite3_int64 *p = (sqlite3_int64*)pCur->pPtr; + sqlite3_result_int64(ctx, p[pCur->iRowid-1]); + return SQLITE_OK; + } + case CARRAY_DOUBLE: { + double *p = (double*)pCur->pPtr; + sqlite3_result_double(ctx, p[pCur->iRowid-1]); + return SQLITE_OK; + } + case CARRAY_TEXT: { + const char **p = (const char**)pCur->pPtr; + sqlite3_result_text(ctx, p[pCur->iRowid-1], -1, SQLITE_TRANSIENT); + return SQLITE_OK; + } + default: { + const struct iovec *p = (struct iovec*)pCur->pPtr; + assert( pCur->eType==CARRAY_BLOB ); + sqlite3_result_blob(ctx, p[pCur->iRowid-1].iov_base, + (int)p[pCur->iRowid-1].iov_len, SQLITE_TRANSIENT); + return SQLITE_OK; + } + } + } + } + sqlite3_result_int64(ctx, x); + return SQLITE_OK; +} + +/* +** Return the rowid for the current row. In this implementation, the +** rowid is the same as the output value. +*/ +static int carrayRowid(sqlite3_vtab_cursor *cur, sqlite_int64 *pRowid){ + carray_cursor *pCur = (carray_cursor*)cur; + *pRowid = pCur->iRowid; + return SQLITE_OK; +} + +/* +** Return TRUE if the cursor has been moved off of the last +** row of output. +*/ +static int carrayEof(sqlite3_vtab_cursor *cur){ + carray_cursor *pCur = (carray_cursor*)cur; + return pCur->iRowid>pCur->iCnt; +} + +/* +** This method is called to "rewind" the carray_cursor object back +** to the first row of output. +*/ +static int carrayFilter( + sqlite3_vtab_cursor *pVtabCursor, + int idxNum, const char *idxStr, + int argc, sqlite3_value **argv +){ + carray_cursor *pCur = (carray_cursor *)pVtabCursor; + pCur->pPtr = 0; + pCur->iCnt = 0; + switch( idxNum ){ + case 1: { + carray_bind *pBind = sqlite3_value_pointer(argv[0], "carray-bind"); + if( pBind==0 ) break; + pCur->pPtr = pBind->aData; + pCur->iCnt = pBind->nData; + pCur->eType = pBind->mFlags & 0x07; + break; + } + case 2: + case 3: { + pCur->pPtr = sqlite3_value_pointer(argv[0], "carray"); + pCur->iCnt = pCur->pPtr ? sqlite3_value_int64(argv[1]) : 0; + if( idxNum<3 ){ + pCur->eType = CARRAY_INT32; + }else{ + unsigned char i; + const char *zType = (const char*)sqlite3_value_text(argv[2]); + for(i=0; i=sizeof(azCarrayType)/sizeof(azCarrayType[0]) ){ + pVtabCursor->pVtab->zErrMsg = sqlite3_mprintf( + "unknown datatype: %Q", zType); + return SQLITE_ERROR; + }else{ + pCur->eType = i; + } + } + break; + } + } + pCur->iRowid = 1; + return SQLITE_OK; +} + +/* +** SQLite will invoke this method one or more times while planning a query +** that uses the carray virtual table. This routine needs to create +** a query plan for each invocation and compute an estimated cost for that +** plan. +** +** In this implementation idxNum is used to represent the +** query plan. idxStr is unused. +** +** idxNum is: +** +** 1 If only the pointer= constraint exists. In this case, the +** parameter must be bound using sqlite3_carray_bind(). +** +** 2 if the pointer= and count= constraints exist. +** +** 3 if the ctype= constraint also exists. +** +** idxNum is 0 otherwise and carray becomes an empty table. +*/ +static int carrayBestIndex( + sqlite3_vtab *tab, + sqlite3_index_info *pIdxInfo +){ + int i; /* Loop over constraints */ + int ptrIdx = -1; /* Index of the pointer= constraint, or -1 if none */ + int cntIdx = -1; /* Index of the count= constraint, or -1 if none */ + int ctypeIdx = -1; /* Index of the ctype= constraint, or -1 if none */ + unsigned seen = 0; /* Bitmask of == constrainted columns */ + + const struct sqlite3_index_constraint *pConstraint; + pConstraint = pIdxInfo->aConstraint; + for(i=0; inConstraint; i++, pConstraint++){ + if( pConstraint->op!=SQLITE_INDEX_CONSTRAINT_EQ ) continue; + if( pConstraint->iColumn>=0 ) seen |= 1 << pConstraint->iColumn; + if( pConstraint->usable==0 ) continue; + switch( pConstraint->iColumn ){ + case CARRAY_COLUMN_POINTER: + ptrIdx = i; + break; + case CARRAY_COLUMN_COUNT: + cntIdx = i; + break; + case CARRAY_COLUMN_CTYPE: + ctypeIdx = i; + break; + } + } + if( ptrIdx>=0 ){ + pIdxInfo->aConstraintUsage[ptrIdx].argvIndex = 1; + pIdxInfo->aConstraintUsage[ptrIdx].omit = 1; + pIdxInfo->estimatedCost = (double)1; + pIdxInfo->estimatedRows = 100; + pIdxInfo->idxNum = 1; + if( cntIdx>=0 ){ + pIdxInfo->aConstraintUsage[cntIdx].argvIndex = 2; + pIdxInfo->aConstraintUsage[cntIdx].omit = 1; + pIdxInfo->idxNum = 2; + if( ctypeIdx>=0 ){ + pIdxInfo->aConstraintUsage[ctypeIdx].argvIndex = 3; + pIdxInfo->aConstraintUsage[ctypeIdx].omit = 1; + pIdxInfo->idxNum = 3; + }else if( seen & (1<estimatedCost = (double)2147483647; + pIdxInfo->estimatedRows = 2147483647; + pIdxInfo->idxNum = 0; + } + return SQLITE_OK; +} + +/* +** This following structure defines all the methods for the +** carray virtual table. +*/ +static sqlite3_module carrayModule = { + 0, /* iVersion */ + 0, /* xCreate */ + carrayConnect, /* xConnect */ + carrayBestIndex, /* xBestIndex */ + carrayDisconnect, /* xDisconnect */ + 0, /* xDestroy */ + carrayOpen, /* xOpen - open a cursor */ + carrayClose, /* xClose - close a cursor */ + carrayFilter, /* xFilter - configure scan constraints */ + carrayNext, /* xNext - advance a cursor */ + carrayEof, /* xEof - check for end of scan */ + carrayColumn, /* xColumn - read data */ + carrayRowid, /* xRowid - read data */ + 0, /* xUpdate */ + 0, /* xBegin */ + 0, /* xSync */ + 0, /* xCommit */ + 0, /* xRollback */ + 0, /* xFindMethod */ + 0, /* xRename */ + 0, /* xSavepoint */ + 0, /* xRelease */ + 0, /* xRollbackTo */ + 0, /* xShadow */ + 0 /* xIntegrity */ +}; + +/* +** Destructor for the carray_bind object +*/ +static void carrayBindDel(void *pPtr){ + carray_bind *p = (carray_bind*)pPtr; + if( p->xDel!=SQLITE_STATIC ){ + p->xDel(p->aData); + } + sqlite3_free(p); +} + +/* +** Invoke this interface in order to bind to the single-argument +** version of CARRAY(). +*/ +SQLITE_API int sqlite3_carray_bind( + sqlite3_stmt *pStmt, + int idx, + void *aData, + int nData, + int mFlags, + void (*xDestroy)(void*) +){ + carray_bind *pNew = 0; + int i; + int rc = SQLITE_OK; + + /* Ensure that the mFlags value is acceptable. */ + assert( CARRAY_INT32==0 && CARRAY_INT64==1 && CARRAY_DOUBLE==2 ); + assert( CARRAY_TEXT==3 && CARRAY_BLOB==4 ); + if( mFlagsCARRAY_BLOB ){ + rc = SQLITE_ERROR; + goto carray_bind_error; + } + + pNew = sqlite3_malloc64(sizeof(*pNew)); + if( pNew==0 ){ + rc = SQLITE_NOMEM; + goto carray_bind_error; + } + + pNew->nData = nData; + pNew->mFlags = mFlags; + if( xDestroy==SQLITE_TRANSIENT ){ + sqlite3_int64 sz = nData; + switch( mFlags ){ + case CARRAY_INT32: sz *= 4; break; + case CARRAY_INT64: sz *= 8; break; + case CARRAY_DOUBLE: sz *= 8; break; + case CARRAY_TEXT: sz *= sizeof(char*); break; + default: sz *= sizeof(struct iovec); break; + } + if( mFlags==CARRAY_TEXT ){ + for(i=0; iaData = sqlite3_malloc64( sz ); + if( pNew->aData==0 ){ + rc = SQLITE_NOMEM; + goto carray_bind_error; + } + + if( mFlags==CARRAY_TEXT ){ + char **az = (char**)pNew->aData; + char *z = (char*)&az[nData]; + for(i=0; iaData; + unsigned char *z = (unsigned char*)&p[nData]; + for(i=0; iaData, aData, sz); + } + pNew->xDel = sqlite3_free; + }else{ + pNew->aData = aData; + pNew->xDel = xDestroy; + } + return sqlite3_bind_pointer(pStmt, idx, pNew, "carray-bind", carrayBindDel); + + carray_bind_error: + if( xDestroy!=SQLITE_STATIC && xDestroy!=SQLITE_TRANSIENT ){ + xDestroy(aData); + } + sqlite3_free(pNew); + return rc; +} + +/* +** Invoke this routine to register the carray() function. +*/ +SQLITE_PRIVATE Module *sqlite3CarrayRegister(sqlite3 *db){ + return sqlite3VtabCreateModule(db, "carray", &carrayModule, 0, 0); +} + +#endif /* !defined(SQLITE_OMIT_VIRTUALTABLE) && defined(SQLITE_ENABLE_CARRAY) */ + +/************** End of carray.c **********************************************/ /************** Begin file sqlite3session.c **********************************/ #if defined(SQLITE_ENABLE_SESSION) && defined(SQLITE_ENABLE_PREUPDATE_HOOK) @@ -230951,6 +233712,19 @@ static int sessionAppendDelete( return rc; } +static int sessionPrepare( + sqlite3 *db, + sqlite3_stmt **pp, + char **pzErrmsg, + const char *zSql +){ + int rc = sqlite3_prepare_v2(db, zSql, -1, pp, 0); + if( pzErrmsg && rc!=SQLITE_OK ){ + *pzErrmsg = sqlite3_mprintf("%s", sqlite3_errmsg(db)); + } + return rc; +} + /* ** Formulate and prepare a SELECT statement to retrieve a row from table ** zTab in database zDb based on its primary key. i.e. @@ -230972,12 +233746,12 @@ static int sessionSelectStmt( int nCol, /* Number of columns in table */ const char **azCol, /* Names of table columns */ u8 *abPK, /* PRIMARY KEY array */ - sqlite3_stmt **ppStmt /* OUT: Prepared SELECT statement */ + sqlite3_stmt **ppStmt, /* OUT: Prepared SELECT statement */ + char **pzErrmsg /* OUT: Error message */ ){ int rc = SQLITE_OK; char *zSql = 0; const char *zSep = ""; - int nSql = -1; int i; SessionBuffer cols = {0, 0, 0}; @@ -231057,7 +233831,7 @@ static int sessionSelectStmt( #endif if( rc==SQLITE_OK ){ - rc = sqlite3_prepare_v2(db, zSql, nSql, ppStmt, 0); + rc = sessionPrepare(db, ppStmt, pzErrmsg, zSql); } sqlite3_free(zSql); sqlite3_free(nooptest.aBuf); @@ -231221,7 +233995,7 @@ static int sessionGenerateChangeset( /* Build and compile a statement to execute: */ if( rc==SQLITE_OK ){ rc = sessionSelectStmt(db, 0, pSession->zDb, - zName, pTab->bRowid, pTab->nCol, pTab->azCol, pTab->abPK, &pSel + zName, pTab->bRowid, pTab->nCol, pTab->azCol, pTab->abPK, &pSel, 0 ); } @@ -232430,6 +235204,7 @@ struct SessionApplyCtx { u8 bRebase; /* True to collect rebase information */ u8 bIgnoreNoop; /* True to ignore no-op conflicts */ int bRowid; + char *zErr; /* Error message, if any */ }; /* Number of prepared UPDATE statements to cache. */ @@ -232655,7 +235430,7 @@ static int sessionDeleteRow( } if( rc==SQLITE_OK ){ - rc = sqlite3_prepare_v2(db, (char *)buf.aBuf, buf.nBuf, &p->pDelete, 0); + rc = sessionPrepare(db, &p->pDelete, &p->zErr, (char*)buf.aBuf); } sqlite3_free(buf.aBuf); @@ -232682,7 +235457,7 @@ static int sessionSelectRow( ){ /* TODO */ return sessionSelectStmt(db, p->bIgnoreNoop, - "main", zTab, p->bRowid, p->nCol, p->azCol, p->abPK, &p->pSelect + "main", zTab, p->bRowid, p->nCol, p->azCol, p->abPK, &p->pSelect, &p->zErr ); } @@ -232719,16 +235494,12 @@ static int sessionInsertRow( sessionAppendStr(&buf, ")", &rc); if( rc==SQLITE_OK ){ - rc = sqlite3_prepare_v2(db, (char *)buf.aBuf, buf.nBuf, &p->pInsert, 0); + rc = sessionPrepare(db, &p->pInsert, &p->zErr, (char*)buf.aBuf); } sqlite3_free(buf.aBuf); return rc; } -static int sessionPrepare(sqlite3 *db, sqlite3_stmt **pp, const char *zSql){ - return sqlite3_prepare_v2(db, zSql, -1, pp, 0); -} - /* ** Prepare statements for applying changes to the sqlite_stat1 table. ** These are similar to those created by sessionSelectRow(), @@ -232738,14 +235509,14 @@ static int sessionPrepare(sqlite3 *db, sqlite3_stmt **pp, const char *zSql){ static int sessionStat1Sql(sqlite3 *db, SessionApplyCtx *p){ int rc = sessionSelectRow(db, "sqlite_stat1", p); if( rc==SQLITE_OK ){ - rc = sessionPrepare(db, &p->pInsert, + rc = sessionPrepare(db, &p->pInsert, 0, "INSERT INTO main.sqlite_stat1 VALUES(?1, " "CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END, " "?3)" ); } if( rc==SQLITE_OK ){ - rc = sessionPrepare(db, &p->pDelete, + rc = sessionPrepare(db, &p->pDelete, 0, "DELETE FROM main.sqlite_stat1 WHERE tbl=?1 AND idx IS " "CASE WHEN length(?2)=0 AND typeof(?2)='blob' THEN NULL ELSE ?2 END " "AND (?4 OR stat IS ?3)" @@ -232969,7 +235740,7 @@ static int sessionConflictHandler( void *pCtx, /* First argument for conflict handler */ int *pbReplace /* OUT: Set to true if PK row is found */ ){ - int res = 0; /* Value returned by conflict handler */ + int res = SQLITE_CHANGESET_OMIT;/* Value returned by conflict handler */ int rc; int nCol; int op; @@ -232990,11 +235761,9 @@ static int sessionConflictHandler( if( rc==SQLITE_ROW ){ /* There exists another row with the new.* primary key. */ - if( p->bIgnoreNoop - && sqlite3_column_int(p->pSelect, sqlite3_column_count(p->pSelect)-1) + if( 0==p->bIgnoreNoop + || 0==sqlite3_column_int(p->pSelect, sqlite3_column_count(p->pSelect)-1) ){ - res = SQLITE_CHANGESET_OMIT; - }else{ pIter->pConflict = p->pSelect; res = xConflict(pCtx, eType, pIter); pIter->pConflict = 0; @@ -233008,7 +235777,9 @@ static int sessionConflictHandler( int nBlob = pIter->in.iNext - pIter->in.iCurrent; sessionAppendBlob(&p->constraints, aBlob, nBlob, &rc); return SQLITE_OK; - }else{ + }else if( p->bIgnoreNoop==0 || op!=SQLITE_DELETE + || eType==SQLITE_CHANGESET_CONFLICT + ){ /* No other row with the new.* primary key. */ res = xConflict(pCtx, eType+1, pIter); if( res==SQLITE_CHANGESET_REPLACE ) rc = SQLITE_MISUSE; @@ -233106,7 +235877,7 @@ static int sessionApplyOneOp( sqlite3_step(p->pDelete); rc = sqlite3_reset(p->pDelete); - if( rc==SQLITE_OK && sqlite3_changes(p->db)==0 && p->bIgnoreNoop==0 ){ + if( rc==SQLITE_OK && sqlite3_changes(p->db)==0 ){ rc = sessionConflictHandler( SQLITE_CHANGESET_DATA, p, pIter, xConflict, pCtx, pbRetry ); @@ -233318,6 +236089,10 @@ static int sessionChangesetApply( void *pCtx, /* Copy of sixth arg to _apply() */ const char *zTab /* Table name */ ), + int(*xFilterIter)( + void *pCtx, /* Copy of sixth arg to _apply() */ + sqlite3_changeset_iter *p + ), int(*xConflict)( void *pCtx, /* Copy of fifth arg to _apply() */ int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */ @@ -233458,6 +236233,9 @@ static int sessionChangesetApply( ** next change. A log message has already been issued. */ if( schemaMismatch ) continue; + /* If this is a call to apply_v3(), invoke xFilterIter here. */ + if( xFilterIter && 0==xFilterIter(pCtx, pIter) ) continue; + rc = sessionApplyOneWithRetry(db, pIter, &sApply, xConflict, pCtx); } @@ -233504,6 +236282,7 @@ static int sessionChangesetApply( assert( sApply.bRebase || sApply.rebase.nBuf==0 ); if( rc==SQLITE_OK && bPatchset==0 && sApply.bRebase ){ + assert( ppRebase!=0 && pnRebase!=0 ); *ppRebase = (void*)sApply.rebase.aBuf; *pnRebase = sApply.rebase.nBuf; sApply.rebase.aBuf = 0; @@ -233521,10 +236300,96 @@ static int sessionChangesetApply( db->flags &= ~((u64)SQLITE_FkNoAction); db->aDb[0].pSchema->schema_cookie -= 32; } + + assert( rc!=SQLITE_OK || sApply.zErr==0 ); + sqlite3_set_errmsg(db, rc, sApply.zErr); + sqlite3_free(sApply.zErr); + sqlite3_mutex_leave(sqlite3_db_mutex(db)); return rc; } +/* +** This function is called by all six sqlite3changeset_apply() variants: +** +** + sqlite3changeset_apply() +** + sqlite3changeset_apply_v2() +** + sqlite3changeset_apply_v3() +** + sqlite3changeset_apply_strm() +** + sqlite3changeset_apply_strm_v2() +** + sqlite3changeset_apply_strm_v3() +** +** Arguments passed to this function are as follows: +** +** db: +** Database handle to apply changeset to main database of. +** +** nChangeset/pChangeset: +** These are both passed zero for the streaming variants. For the normal +** apply() functions, these are passed the size of and the buffer containing +** the changeset, respectively. +** +** xInput/pIn: +** These are both passed zero for the normal variants. For the streaming +** apply() functions, these are passed the input callback and context +** pointer, respectively. +** +** xFilter: +** The filter function as passed to apply() or apply_v2() (to filter by +** table name), if any. This is always NULL for apply_v3() calls. +** +** xFilterIter: +** The filter function as passed to apply_v3(), if any. +** +** xConflict: +** The conflict handler callback (must not be NULL). +** +** pCtx: +** The context pointer passed to the xFilter and xConflict handler callbacks. +** +** ppRebase, pnRebase: +** Zero for apply(). The rebase changeset output pointers, if any, for +** apply_v2() and apply_v3(). +** +** flags: +** Zero for apply(). The flags parameter for apply_v2() and apply_v3(). +*/ +static int sessionChangesetApplyV23( + sqlite3 *db, /* Apply change to "main" db of this handle */ + int nChangeset, /* Size of changeset in bytes */ + void *pChangeset, /* Changeset blob */ + int (*xInput)(void *pIn, void *pData, int *pnData), /* Input function */ + void *pIn, /* First arg for xInput */ + int(*xFilter)( + void *pCtx, /* Copy of sixth arg to _apply() */ + const char *zTab /* Table name */ + ), + int(*xFilterIter)( + void *pCtx, /* Copy of sixth arg to _apply() */ + sqlite3_changeset_iter *p /* Handle describing current change */ + ), + int(*xConflict)( + void *pCtx, /* Copy of sixth arg to _apply() */ + int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */ + sqlite3_changeset_iter *p /* Handle describing change and conflict */ + ), + void *pCtx, /* First argument passed to xConflict */ + void **ppRebase, int *pnRebase, + int flags +){ + sqlite3_changeset_iter *pIter; /* Iterator to skip through changeset */ + int bInverse = !!(flags & SQLITE_CHANGESETAPPLY_INVERT); + int rc = sessionChangesetStart( + &pIter, xInput, pIn, nChangeset, pChangeset, bInverse, 1 + ); + if( rc==SQLITE_OK ){ + rc = sessionChangesetApply(db, pIter, + xFilter, xFilterIter, xConflict, pCtx, ppRebase, pnRebase, flags + ); + } + return rc; +} + /* ** Apply the changeset passed via pChangeset/nChangeset to the main ** database attached to handle "db". @@ -233546,17 +236411,39 @@ SQLITE_API int sqlite3changeset_apply_v2( void **ppRebase, int *pnRebase, int flags ){ - sqlite3_changeset_iter *pIter; /* Iterator to skip through changeset */ - int bInv = !!(flags & SQLITE_CHANGESETAPPLY_INVERT); - int rc = sessionChangesetStart(&pIter, 0, 0, nChangeset, pChangeset, bInv, 1); + return sessionChangesetApplyV23(db, + nChangeset, pChangeset, 0, 0, + xFilter, 0, xConflict, pCtx, + ppRebase, pnRebase, flags + ); +} - if( rc==SQLITE_OK ){ - rc = sessionChangesetApply( - db, pIter, xFilter, xConflict, pCtx, ppRebase, pnRebase, flags - ); - } - - return rc; +/* +** Apply the changeset passed via pChangeset/nChangeset to the main +** database attached to handle "db". +*/ +SQLITE_API int sqlite3changeset_apply_v3( + sqlite3 *db, /* Apply change to "main" db of this handle */ + int nChangeset, /* Size of changeset in bytes */ + void *pChangeset, /* Changeset blob */ + int(*xFilter)( + void *pCtx, /* Copy of sixth arg to _apply() */ + sqlite3_changeset_iter *p /* Handle describing current change */ + ), + int(*xConflict)( + void *pCtx, /* Copy of sixth arg to _apply() */ + int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */ + sqlite3_changeset_iter *p /* Handle describing change and conflict */ + ), + void *pCtx, /* First argument passed to xConflict */ + void **ppRebase, int *pnRebase, + int flags +){ + return sessionChangesetApplyV23(db, + nChangeset, pChangeset, 0, 0, + 0, xFilter, xConflict, pCtx, + ppRebase, pnRebase, flags + ); } /* @@ -233579,8 +236466,10 @@ SQLITE_API int sqlite3changeset_apply( ), void *pCtx /* First argument passed to xConflict */ ){ - return sqlite3changeset_apply_v2( - db, nChangeset, pChangeset, xFilter, xConflict, pCtx, 0, 0, 0 + return sessionChangesetApplyV23(db, + nChangeset, pChangeset, 0, 0, + xFilter, 0, xConflict, pCtx, + 0, 0, 0 ); } @@ -233589,6 +236478,29 @@ SQLITE_API int sqlite3changeset_apply( ** attached to handle "db". Invoke the supplied conflict handler callback ** to resolve any conflicts encountered while applying the change. */ +SQLITE_API int sqlite3changeset_apply_v3_strm( + sqlite3 *db, /* Apply change to "main" db of this handle */ + int (*xInput)(void *pIn, void *pData, int *pnData), /* Input function */ + void *pIn, /* First arg for xInput */ + int(*xFilter)( + void *pCtx, /* Copy of sixth arg to _apply() */ + sqlite3_changeset_iter *p + ), + int(*xConflict)( + void *pCtx, /* Copy of sixth arg to _apply() */ + int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */ + sqlite3_changeset_iter *p /* Handle describing change and conflict */ + ), + void *pCtx, /* First argument passed to xConflict */ + void **ppRebase, int *pnRebase, + int flags +){ + return sessionChangesetApplyV23(db, + 0, 0, xInput, pIn, + 0, xFilter, xConflict, pCtx, + ppRebase, pnRebase, flags + ); +} SQLITE_API int sqlite3changeset_apply_v2_strm( sqlite3 *db, /* Apply change to "main" db of this handle */ int (*xInput)(void *pIn, void *pData, int *pnData), /* Input function */ @@ -233606,15 +236518,11 @@ SQLITE_API int sqlite3changeset_apply_v2_strm( void **ppRebase, int *pnRebase, int flags ){ - sqlite3_changeset_iter *pIter; /* Iterator to skip through changeset */ - int bInverse = !!(flags & SQLITE_CHANGESETAPPLY_INVERT); - int rc = sessionChangesetStart(&pIter, xInput, pIn, 0, 0, bInverse, 1); - if( rc==SQLITE_OK ){ - rc = sessionChangesetApply( - db, pIter, xFilter, xConflict, pCtx, ppRebase, pnRebase, flags - ); - } - return rc; + return sessionChangesetApplyV23(db, + 0, 0, xInput, pIn, + xFilter, 0, xConflict, pCtx, + ppRebase, pnRebase, flags + ); } SQLITE_API int sqlite3changeset_apply_strm( sqlite3 *db, /* Apply change to "main" db of this handle */ @@ -233631,8 +236539,10 @@ SQLITE_API int sqlite3changeset_apply_strm( ), void *pCtx /* First argument passed to xConflict */ ){ - return sqlite3changeset_apply_v2_strm( - db, xInput, pIn, xFilter, xConflict, pCtx, 0, 0, 0 + return sessionChangesetApplyV23(db, + 0, 0, xInput, pIn, + xFilter, 0, xConflict, pCtx, + 0, 0, 0 ); } @@ -235604,27 +238514,20 @@ typedef sqlite3_uint64 u64; # define LARGEST_INT64 (0xffffffff|(((i64)0x7fffffff)<<32)) # define SMALLEST_INT64 (((i64)-1) - LARGEST_INT64) -/* The uptr type is an unsigned integer large enough to hold a pointer +/* +** This macro is used in a single assert() within fts5 to check that an +** allocation is aligned to an 8-byte boundary. But it is a complicated +** macro to get right for multiple platforms without generating warnings. +** So instead of reproducing the entire definition from sqliteInt.h, we +** just do without this assert() for the rare non-amalgamation builds. */ -#if defined(HAVE_STDINT_H) - typedef uintptr_t uptr; -#elif SQLITE_PTRSIZE==4 - typedef u32 uptr; -#else - typedef u64 uptr; -#endif - -#ifdef SQLITE_4_BYTE_ALIGNED_MALLOC -# define EIGHT_BYTE_ALIGNMENT(X) ((((uptr)(X) - (uptr)0)&3)==0) -#else -# define EIGHT_BYTE_ALIGNMENT(X) ((((uptr)(X) - (uptr)0)&7)==0) -#endif +#define EIGHT_BYTE_ALIGNMENT(x) 1 /* ** Macros needed to provide flexible arrays in a portable way */ #ifndef offsetof -# define offsetof(STRUCTURE,FIELD) ((size_t)((char*)&((STRUCTURE*)0)->FIELD)) +# define offsetof(ST,M) ((size_t)((char*)&((ST*)0)->M - (char*)0)) #endif #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) # define FLEXARRAY @@ -236366,7 +239269,7 @@ static int sqlite3Fts5ExprPattern( ** i64 iRowid = sqlite3Fts5ExprRowid(pExpr); ** } */ -static int sqlite3Fts5ExprFirst(Fts5Expr*, Fts5Index *pIdx, i64 iMin, int bDesc); +static int sqlite3Fts5ExprFirst(Fts5Expr*, Fts5Index *pIdx, i64 iMin, i64, int bDesc); static int sqlite3Fts5ExprNext(Fts5Expr*, i64 iMax); static int sqlite3Fts5ExprEof(Fts5Expr*); static i64 sqlite3Fts5ExprRowid(Fts5Expr*); @@ -241935,7 +244838,13 @@ static int fts5ExprNodeFirst(Fts5Expr *pExpr, Fts5ExprNode *pNode){ ** Return SQLITE_OK if successful, or an SQLite error code otherwise. It ** is not considered an error if the query does not match any documents. */ -static int sqlite3Fts5ExprFirst(Fts5Expr *p, Fts5Index *pIdx, i64 iFirst, int bDesc){ +static int sqlite3Fts5ExprFirst( + Fts5Expr *p, + Fts5Index *pIdx, + i64 iFirst, + i64 iLast, + int bDesc +){ Fts5ExprNode *pRoot = p->pRoot; int rc; /* Return code */ @@ -241957,6 +244866,9 @@ static int sqlite3Fts5ExprFirst(Fts5Expr *p, Fts5Index *pIdx, i64 iFirst, int bD assert( pRoot->bEof==0 ); rc = fts5ExprNodeNext(p, pRoot, 0, 0); } + if( fts5RowidCmp(p, pRoot->iRowid, iLast)>0 ){ + pRoot->bEof = 1; + } return rc; } @@ -244809,6 +247721,36 @@ struct Fts5SegIter { u8 bDel; /* True if the delete flag is set */ }; +static int fts5IndexCorruptRowid(Fts5Index *pIdx, i64 iRowid){ + pIdx->rc = FTS5_CORRUPT; + sqlite3Fts5ConfigErrmsg(pIdx->pConfig, + "fts5: corruption found reading blob %lld from table \"%s\"", + iRowid, pIdx->pConfig->zName + ); + return SQLITE_CORRUPT_VTAB; +} +#define FTS5_CORRUPT_ROWID(pIdx, iRowid) fts5IndexCorruptRowid(pIdx, iRowid) + +static int fts5IndexCorruptIter(Fts5Index *pIdx, Fts5SegIter *pIter){ + pIdx->rc = FTS5_CORRUPT; + sqlite3Fts5ConfigErrmsg(pIdx->pConfig, + "fts5: corruption on page %d, segment %d, table \"%s\"", + pIter->iLeafPgno, pIter->pSeg->iSegid, pIdx->pConfig->zName + ); + return SQLITE_CORRUPT_VTAB; +} +#define FTS5_CORRUPT_ITER(pIdx, pIter) fts5IndexCorruptIter(pIdx, pIter) + +static int fts5IndexCorruptIdx(Fts5Index *pIdx){ + pIdx->rc = FTS5_CORRUPT; + sqlite3Fts5ConfigErrmsg(pIdx->pConfig, + "fts5: corruption in table \"%s\"", pIdx->pConfig->zName + ); + return SQLITE_CORRUPT_VTAB; +} +#define FTS5_CORRUPT_IDX(pIdx) fts5IndexCorruptIdx(pIdx) + + /* ** Array of tombstone pages. Reference counted. */ @@ -245098,13 +248040,13 @@ static Fts5Data *fts5DataRead(Fts5Index *p, i64 iRowid){ ** All the reasons those functions might return SQLITE_ERROR - missing ** table, missing row, non-blob/text in block column - indicate ** backing store corruption. */ - if( rc==SQLITE_ERROR ) rc = FTS5_CORRUPT; + if( rc==SQLITE_ERROR ) rc = FTS5_CORRUPT_ROWID(p, iRowid); if( rc==SQLITE_OK ){ u8 *aOut = 0; /* Read blob data into this buffer */ - int nByte = sqlite3_blob_bytes(p->pReader); - int szData = (sizeof(Fts5Data) + 7) & ~7; - sqlite3_int64 nAlloc = szData + nByte + FTS5_DATA_PADDING; + i64 nByte = sqlite3_blob_bytes(p->pReader); + i64 szData = (sizeof(Fts5Data) + 7) & ~7; + i64 nAlloc = szData + nByte + FTS5_DATA_PADDING; pRet = (Fts5Data*)sqlite3_malloc64(nAlloc); if( pRet ){ pRet->nn = nByte; @@ -245148,7 +248090,7 @@ static Fts5Data *fts5LeafRead(Fts5Index *p, i64 iRowid){ Fts5Data *pRet = fts5DataRead(p, iRowid); if( pRet ){ if( pRet->nn<4 || pRet->szLeaf>pRet->nn ){ - p->rc = FTS5_CORRUPT; + FTS5_CORRUPT_ROWID(p, iRowid); fts5DataRelease(pRet); pRet = 0; } @@ -245507,8 +248449,14 @@ static Fts5Structure *fts5StructureReadUncached(Fts5Index *p){ /* TODO: Do we need this if the leaf-index is appended? Probably... */ memset(&pData->p[pData->nn], 0, FTS5_DATA_PADDING); p->rc = fts5StructureDecode(pData->p, pData->nn, &iCookie, &pRet); - if( p->rc==SQLITE_OK && (pConfig->pgsz==0 || pConfig->iCookie!=iCookie) ){ - p->rc = sqlite3Fts5ConfigLoad(pConfig, iCookie); + if( p->rc==SQLITE_OK ){ + if( (pConfig->pgsz==0 || pConfig->iCookie!=iCookie) ){ + p->rc = sqlite3Fts5ConfigLoad(pConfig, iCookie); + } + }else if( p->rc==SQLITE_CORRUPT_VTAB ){ + sqlite3Fts5ConfigErrmsg(p->pConfig, + "fts5: corrupt structure record for table \"%s\"", p->pConfig->zName + ); } fts5DataRelease(pData); if( p->rc!=SQLITE_OK ){ @@ -246131,7 +249079,7 @@ static void fts5SegIterLoadRowid(Fts5Index *p, Fts5SegIter *pIter){ while( iOff>=pIter->pLeaf->szLeaf ){ fts5SegIterNextPage(p, pIter); if( pIter->pLeaf==0 ){ - if( p->rc==SQLITE_OK ) p->rc = FTS5_CORRUPT; + if( p->rc==SQLITE_OK ) FTS5_CORRUPT_ITER(p, pIter); return; } iOff = 4; @@ -246163,7 +249111,7 @@ static void fts5SegIterLoadTerm(Fts5Index *p, Fts5SegIter *pIter, int nKeep){ iOff += fts5GetVarint32(&a[iOff], nNew); if( iOff+nNew>pIter->pLeaf->szLeaf || nKeep>pIter->term.n || nNew==0 ){ - p->rc = FTS5_CORRUPT; + FTS5_CORRUPT_ITER(p, pIter); return; } pIter->term.n = nKeep; @@ -246293,6 +249241,7 @@ static void fts5SegIterReverseInitPage(Fts5Index *p, Fts5SegIter *pIter){ while( 1 ){ u64 iDelta = 0; + if( i>=n ) break; if( eDetail==FTS5_DETAIL_NONE ){ /* todo */ if( i=pNew->szLeaf ){ - p->rc = FTS5_CORRUPT; + FTS5_CORRUPT_ITER(p, pIter); }else{ pIter->pLeaf = pNew; pIter->iLeafOffset = iRowidOff; @@ -246592,7 +249541,7 @@ static void fts5SegIterNext( } assert_nc( iOffszLeaf ); if( iOff>pLeaf->szLeaf ){ - p->rc = FTS5_CORRUPT; + FTS5_CORRUPT_ITER(p, pIter); return; } } @@ -246700,18 +249649,20 @@ static void fts5SegIterReverse(Fts5Index *p, Fts5SegIter *pIter){ fts5DataRelease(pIter->pLeaf); pIter->pLeaf = pLast; pIter->iLeafPgno = pgnoLast; - iOff = fts5LeafFirstRowidOff(pLast); - if( iOff>pLast->szLeaf ){ - p->rc = FTS5_CORRUPT; - return; - } - iOff += fts5GetVarint(&pLast->p[iOff], (u64*)&pIter->iRowid); - pIter->iLeafOffset = iOff; + if( p->rc==SQLITE_OK ){ + iOff = fts5LeafFirstRowidOff(pLast); + if( iOff>pLast->szLeaf ){ + FTS5_CORRUPT_ITER(p, pIter); + return; + } + iOff += fts5GetVarint(&pLast->p[iOff], (u64*)&pIter->iRowid); + pIter->iLeafOffset = iOff; - if( fts5LeafIsTermless(pLast) ){ - pIter->iEndofDoclist = pLast->nn+1; - }else{ - pIter->iEndofDoclist = fts5LeafFirstTermOff(pLast); + if( fts5LeafIsTermless(pLast) ){ + pIter->iEndofDoclist = pLast->nn+1; + }else{ + pIter->iEndofDoclist = fts5LeafFirstTermOff(pLast); + } } } @@ -246781,7 +249732,7 @@ static void fts5LeafSeek( iPgidx += fts5GetVarint32(&a[iPgidx], iTermOff); iOff = iTermOff; if( iOff>n ){ - p->rc = FTS5_CORRUPT; + FTS5_CORRUPT_ITER(p, pIter); return; } @@ -246824,7 +249775,7 @@ static void fts5LeafSeek( iOff = iTermOff; if( iOff>=n ){ - p->rc = FTS5_CORRUPT; + FTS5_CORRUPT_ITER(p, pIter); return; } @@ -246846,7 +249797,7 @@ static void fts5LeafSeek( iPgidx = (u32)pIter->pLeaf->szLeaf; iPgidx += fts5GetVarint32(&pIter->pLeaf->p[iPgidx], iOff); if( iOff<4 || (i64)iOff>=pIter->pLeaf->szLeaf ){ - p->rc = FTS5_CORRUPT; + FTS5_CORRUPT_ITER(p, pIter); return; }else{ nKeep = 0; @@ -246861,7 +249812,7 @@ static void fts5LeafSeek( search_success: if( (i64)iOff+nNew>n || nNew<1 ){ - p->rc = FTS5_CORRUPT; + FTS5_CORRUPT_ITER(p, pIter); return; } pIter->iLeafOffset = iOff + nNew; @@ -247326,7 +250277,7 @@ static void fts5SegIterGotoPage( assert( iLeafPgno>pIter->iLeafPgno ); if( iLeafPgno>pIter->pSeg->pgnoLast ){ - p->rc = FTS5_CORRUPT; + FTS5_CORRUPT_IDX(p); }else{ fts5DataRelease(pIter->pNextLeaf); pIter->pNextLeaf = 0; @@ -247341,7 +250292,7 @@ static void fts5SegIterGotoPage( u8 *a = pIter->pLeaf->p; int n = pIter->pLeaf->szLeaf; if( iOff<4 || iOff>=n ){ - p->rc = FTS5_CORRUPT; + FTS5_CORRUPT_IDX(p); }else{ iOff += fts5GetVarint(&a[iOff], (u64*)&pIter->iRowid); pIter->iLeafOffset = iOff; @@ -247820,7 +250771,7 @@ static void fts5ChunkIterate( if( nRem<=0 ){ break; }else if( pSeg->pSeg==0 ){ - p->rc = FTS5_CORRUPT; + FTS5_CORRUPT_IDX(p); return; }else{ pgno++; @@ -248923,7 +251874,7 @@ static void fts5TrimSegments(Fts5Index *p, Fts5Iter *pIter){ ** a single page has been assigned to more than one segment. In ** this case a prior iteration of this loop may have corrupted the ** segment currently being trimmed. */ - p->rc = FTS5_CORRUPT; + FTS5_CORRUPT_ROWID(p, iLeafRowid); }else{ fts5BufferZero(&buf); fts5BufferGrow(&p->rc, &buf, pData->nn); @@ -249390,7 +252341,7 @@ static void fts5SecureDeleteOverflow( }else if( bDetailNone ){ break; }else if( iNext>=pLeaf->szLeaf || pLeaf->nnszLeaf || iNext<4 ){ - p->rc = FTS5_CORRUPT; + FTS5_CORRUPT_ROWID(p, iRowid); break; }else{ int nShift = iNext - 4; @@ -249410,7 +252361,7 @@ static void fts5SecureDeleteOverflow( i1 += fts5GetVarint32(&aPg[i1], iFirst); if( iFirstrc = FTS5_CORRUPT; + FTS5_CORRUPT_ROWID(p, iRowid); break; } aIdx = sqlite3Fts5MallocZero(&p->rc, (pLeaf->nn-pLeaf->szLeaf)+2); @@ -249633,14 +252584,14 @@ static void fts5DoSecureDelete( nSuffix = (nPrefix2 + nSuffix2) - nPrefix; if( (iKeyOff+nSuffix)>iPgIdx || (iNextOff+nSuffix2)>iPgIdx ){ - p->rc = FTS5_CORRUPT; + FTS5_CORRUPT_IDX(p); }else{ if( iKey!=1 ){ iOff += sqlite3Fts5PutVarint(&aPg[iOff], nPrefix); } iOff += sqlite3Fts5PutVarint(&aPg[iOff], nSuffix); if( nPrefix2>pSeg->term.n ){ - p->rc = FTS5_CORRUPT; + FTS5_CORRUPT_IDX(p); }else if( nPrefix2>nPrefix ){ memcpy(&aPg[iOff], &pSeg->term.p[nPrefix], nPrefix2-nPrefix); iOff += (nPrefix2-nPrefix); @@ -250064,7 +253015,7 @@ static Fts5Structure *fts5IndexOptimizeStruct( } nByte += (((i64)pStruct->nLevel)+1) * sizeof(Fts5StructureLevel); - assert( nByte==SZ_FTS5STRUCTURE(pStruct->nLevel+2) ); + assert( nByte==(i64)SZ_FTS5STRUCTURE(pStruct->nLevel+2) ); pNew = (Fts5Structure*)sqlite3Fts5MallocZero(&p->rc, nByte); if( pNew ){ @@ -250433,7 +253384,7 @@ static void fts5MergePrefixLists( } if( pHead==0 || pHead->pNext==0 ){ - p->rc = FTS5_CORRUPT; + FTS5_CORRUPT_IDX(p); break; } @@ -250470,7 +253421,7 @@ static void fts5MergePrefixLists( assert_nc( tmp.n+nTail<=nTmp ); assert( tmp.n+nTail<=nTmp+nMerge*10 ); if( tmp.n+nTail>nTmp-FTS5_DATA_ZERO_PADDING ){ - if( p->rc==SQLITE_OK ) p->rc = FTS5_CORRUPT; + if( p->rc==SQLITE_OK ) FTS5_CORRUPT_IDX(p); break; } fts5BufferSafeAppendVarint(&out, (tmp.n+nTail) * 2); @@ -251039,11 +253990,14 @@ static int sqlite3Fts5IndexRollback(Fts5Index *p){ */ static int sqlite3Fts5IndexReinit(Fts5Index *p){ Fts5Structure *pTmp; - u8 tmpSpace[SZ_FTS5STRUCTURE(1)]; + union { + Fts5Structure sFts; + u8 tmpSpace[SZ_FTS5STRUCTURE(1)]; + } uFts; fts5StructureInvalidate(p); fts5IndexDiscardData(p); - pTmp = (Fts5Structure*)tmpSpace; - memset(pTmp, 0, SZ_FTS5STRUCTURE(1)); + pTmp = &uFts.sFts; + memset(uFts.tmpSpace, 0, sizeof(uFts.tmpSpace)); if( p->pConfig->bContentlessDelete ){ pTmp->nOriginCntr = 1; } @@ -252503,19 +255457,27 @@ static int fts5TestUtf8(const char *z, int n){ /* ** This function is also purely an internal test. It does not contribute to ** FTS functionality, or even the integrity-check, in any way. +** +** This function sets output variable (*pbFail) to true if the test fails. Or +** leaves it unchanged if the test succeeds. */ static void fts5TestTerm( Fts5Index *p, Fts5Buffer *pPrev, /* Previous term */ const char *z, int n, /* Possibly new term to test */ u64 expected, - u64 *pCksum + u64 *pCksum, + int *pbFail ){ int rc = p->rc; if( pPrev->n==0 ){ fts5BufferSet(&rc, pPrev, n, (const u8*)z); }else - if( rc==SQLITE_OK && (pPrev->n!=n || memcmp(pPrev->p, z, n)) ){ + if( *pbFail==0 + && rc==SQLITE_OK + && (pPrev->n!=n || memcmp(pPrev->p, z, n)) + && (p->pHash==0 || p->pHash->nEntry==0) + ){ u64 cksum3 = *pCksum; const char *zTerm = (const char*)&pPrev->p[1]; /* term sans prefix-byte */ int nTerm = pPrev->n-1; /* Size of zTerm in bytes */ @@ -252565,7 +255527,7 @@ static void fts5TestTerm( fts5BufferSet(&rc, pPrev, n, (const u8*)z); if( rc==SQLITE_OK && cksum3!=expected ){ - rc = FTS5_CORRUPT; + *pbFail = 1; } *pCksum = cksum3; } @@ -252574,7 +255536,7 @@ static void fts5TestTerm( #else # define fts5TestDlidxReverse(x,y,z) -# define fts5TestTerm(u,v,w,x,y,z) +# define fts5TestTerm(t,u,v,w,x,y,z) #endif /* @@ -252599,14 +255561,17 @@ static void fts5IndexIntegrityCheckEmpty( for(i=iFirst; p->rc==SQLITE_OK && i<=iLast; i++){ Fts5Data *pLeaf = fts5DataRead(p, FTS5_SEGMENT_ROWID(pSeg->iSegid, i)); if( pLeaf ){ - if( !fts5LeafIsTermless(pLeaf) ) p->rc = FTS5_CORRUPT; - if( i>=iNoRowid && 0!=fts5LeafFirstRowidOff(pLeaf) ) p->rc = FTS5_CORRUPT; + if( !fts5LeafIsTermless(pLeaf) + || (i>=iNoRowid && 0!=fts5LeafFirstRowidOff(pLeaf)) + ){ + FTS5_CORRUPT_ROWID(p, FTS5_SEGMENT_ROWID(pSeg->iSegid, i)); + } } fts5DataRelease(pLeaf); } } -static void fts5IntegrityCheckPgidx(Fts5Index *p, Fts5Data *pLeaf){ +static void fts5IntegrityCheckPgidx(Fts5Index *p, i64 iRowid, Fts5Data *pLeaf){ i64 iTermOff = 0; int ii; @@ -252624,12 +255589,12 @@ static void fts5IntegrityCheckPgidx(Fts5Index *p, Fts5Data *pLeaf){ iOff = iTermOff; if( iOff>=pLeaf->szLeaf ){ - p->rc = FTS5_CORRUPT; + FTS5_CORRUPT_ROWID(p, iRowid); }else if( iTermOff==nIncr ){ int nByte; iOff += fts5GetVarint32(&pLeaf->p[iOff], nByte); if( (iOff+nByte)>pLeaf->szLeaf ){ - p->rc = FTS5_CORRUPT; + FTS5_CORRUPT_ROWID(p, iRowid); }else{ fts5BufferSet(&p->rc, &buf1, nByte, &pLeaf->p[iOff]); } @@ -252638,7 +255603,7 @@ static void fts5IntegrityCheckPgidx(Fts5Index *p, Fts5Data *pLeaf){ iOff += fts5GetVarint32(&pLeaf->p[iOff], nKeep); iOff += fts5GetVarint32(&pLeaf->p[iOff], nByte); if( nKeep>buf1.n || (iOff+nByte)>pLeaf->szLeaf ){ - p->rc = FTS5_CORRUPT; + FTS5_CORRUPT_ROWID(p, iRowid); }else{ buf1.n = nKeep; fts5BufferAppendBlob(&p->rc, &buf1, nByte, &pLeaf->p[iOff]); @@ -252646,7 +255611,7 @@ static void fts5IntegrityCheckPgidx(Fts5Index *p, Fts5Data *pLeaf){ if( p->rc==SQLITE_OK ){ res = fts5BufferCompare(&buf1, &buf2); - if( res<=0 ) p->rc = FTS5_CORRUPT; + if( res<=0 ) FTS5_CORRUPT_ROWID(p, iRowid); } } fts5BufferSet(&p->rc, &buf2, buf1.n, buf1.p); @@ -252707,7 +255672,7 @@ static void fts5IndexIntegrityCheckSegment( ** entry even if all the terms are removed from it by secure-delete ** operations. */ }else{ - p->rc = FTS5_CORRUPT; + FTS5_CORRUPT_ROWID(p, iRow); } }else{ @@ -252719,15 +255684,15 @@ static void fts5IndexIntegrityCheckSegment( iOff = fts5LeafFirstTermOff(pLeaf); iRowidOff = fts5LeafFirstRowidOff(pLeaf); if( iRowidOff>=iOff || iOff>=pLeaf->szLeaf ){ - p->rc = FTS5_CORRUPT; + FTS5_CORRUPT_ROWID(p, iRow); }else{ iOff += fts5GetVarint32(&pLeaf->p[iOff], nTerm); res = fts5Memcmp(&pLeaf->p[iOff], zIdxTerm, MIN(nTerm, nIdxTerm)); if( res==0 ) res = nTerm - nIdxTerm; - if( res<0 ) p->rc = FTS5_CORRUPT; + if( res<0 ) FTS5_CORRUPT_ROWID(p, iRow); } - fts5IntegrityCheckPgidx(p, pLeaf); + fts5IntegrityCheckPgidx(p, iRow, pLeaf); } fts5DataRelease(pLeaf); if( p->rc ) break; @@ -252757,7 +255722,7 @@ static void fts5IndexIntegrityCheckSegment( iKey = FTS5_SEGMENT_ROWID(iSegid, iPg); pLeaf = fts5DataRead(p, iKey); if( pLeaf ){ - if( fts5LeafFirstRowidOff(pLeaf)!=0 ) p->rc = FTS5_CORRUPT; + if( fts5LeafFirstRowidOff(pLeaf)!=0 ) FTS5_CORRUPT_ROWID(p, iKey); fts5DataRelease(pLeaf); } } @@ -252772,12 +255737,12 @@ static void fts5IndexIntegrityCheckSegment( int iRowidOff = fts5LeafFirstRowidOff(pLeaf); ASSERT_SZLEAF_OK(pLeaf); if( iRowidOff>=pLeaf->szLeaf ){ - p->rc = FTS5_CORRUPT; + FTS5_CORRUPT_ROWID(p, iKey); }else if( bSecureDelete==0 || iRowidOff>0 ){ i64 iDlRowid = fts5DlidxIterRowid(pDlidx); fts5GetVarint(&pLeaf->p[iRowidOff], (u64*)&iRowid); if( iRowidrc = FTS5_CORRUPT; + FTS5_CORRUPT_ROWID(p, iKey); } } fts5DataRelease(pLeaf); @@ -252829,6 +255794,7 @@ static int sqlite3Fts5IndexIntegrityCheck(Fts5Index *p, u64 cksum, int bUseCksum /* Used by extra internal tests only run if NDEBUG is not defined */ u64 cksum3 = 0; /* Checksum based on contents of indexes */ Fts5Buffer term = {0,0,0}; /* Buffer used to hold most recent term */ + int bTestFail = 0; #endif const int flags = FTS5INDEX_QUERY_NOOUTPUT; @@ -252871,7 +255837,7 @@ static int sqlite3Fts5IndexIntegrityCheck(Fts5Index *p, u64 cksum, int bUseCksum char *z = (char*)fts5MultiIterTerm(pIter, &n); /* If this is a new term, query for it. Update cksum3 with the results. */ - fts5TestTerm(p, &term, z, n, cksum2, &cksum3); + fts5TestTerm(p, &term, z, n, cksum2, &cksum3, &bTestFail); if( p->rc ) break; if( eDetail==FTS5_DETAIL_NONE ){ @@ -252889,15 +255855,26 @@ static int sqlite3Fts5IndexIntegrityCheck(Fts5Index *p, u64 cksum, int bUseCksum } } } - fts5TestTerm(p, &term, 0, 0, cksum2, &cksum3); + fts5TestTerm(p, &term, 0, 0, cksum2, &cksum3, &bTestFail); fts5MultiIterFree(pIter); - if( p->rc==SQLITE_OK && bUseCksum && cksum!=cksum2 ) p->rc = FTS5_CORRUPT; - - fts5StructureRelease(pStruct); + if( p->rc==SQLITE_OK && bUseCksum && cksum!=cksum2 ){ + p->rc = FTS5_CORRUPT; + sqlite3Fts5ConfigErrmsg(p->pConfig, + "fts5: checksum mismatch for table \"%s\"", p->pConfig->zName + ); + } #ifdef SQLITE_DEBUG + /* In SQLITE_DEBUG builds, expensive extra checks were run as part of + ** the integrity-check above. If no other errors were detected, but one + ** of these tests failed, set the result to SQLITE_CORRUPT_VTAB here. */ + if( p->rc==SQLITE_OK && bTestFail ){ + p->rc = FTS5_CORRUPT; + } fts5BufferFree(&term); #endif + + fts5StructureRelease(pStruct); fts5BufferFree(&poslist); return fts5IndexReturn(p); } @@ -254241,6 +257218,17 @@ static void fts5SetUniqueFlag(sqlite3_index_info *pIdxInfo){ #endif } +static void fts5SetEstimatedRows(sqlite3_index_info *pIdxInfo, i64 nRow){ +#if SQLITE_VERSION_NUMBER>=3008002 +#ifndef SQLITE_CORE + if( sqlite3_libversion_number()>=3008002 ) +#endif + { + pIdxInfo->estimatedRows = nRow; + } +#endif +} + static int fts5UsePatternMatch( Fts5Config *pConfig, struct sqlite3_index_constraint *p @@ -254376,7 +257364,7 @@ static int fts5BestIndexMethod(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ nSeenMatch++; idxStr[iIdxStr++] = 'M'; sqlite3_snprintf(6, &idxStr[iIdxStr], "%d", iCol); - idxStr += strlen(&idxStr[iIdxStr]); + iIdxStr += (int)strlen(&idxStr[iIdxStr]); assert( idxStr[iIdxStr]=='\0' ); } pInfo->aConstraintUsage[i].argvIndex = ++iCons; @@ -254395,6 +257383,7 @@ static int fts5BestIndexMethod(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ idxStr[iIdxStr++] = '='; bSeenEq = 1; pInfo->aConstraintUsage[i].argvIndex = ++iCons; + pInfo->aConstraintUsage[i].omit = 1; } } } @@ -254442,17 +257431,21 @@ static int fts5BestIndexMethod(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ /* Calculate the estimated cost based on the flags set in idxFlags. */ if( bSeenEq ){ - pInfo->estimatedCost = nSeenMatch ? 1000.0 : 10.0; - if( nSeenMatch==0 ) fts5SetUniqueFlag(pInfo); - }else if( bSeenLt && bSeenGt ){ - pInfo->estimatedCost = nSeenMatch ? 5000.0 : 250000.0; - }else if( bSeenLt || bSeenGt ){ - pInfo->estimatedCost = nSeenMatch ? 7500.0 : 750000.0; + pInfo->estimatedCost = nSeenMatch ? 1000.0 : 25.0; + fts5SetUniqueFlag(pInfo); + fts5SetEstimatedRows(pInfo, 1); }else{ - pInfo->estimatedCost = nSeenMatch ? 10000.0 : 1000000.0; - } - for(i=1; iestimatedCost *= 0.4; + if( bSeenLt && bSeenGt ){ + pInfo->estimatedCost = nSeenMatch ? 5000.0 : 750000.0; + }else if( bSeenLt || bSeenGt ){ + pInfo->estimatedCost = nSeenMatch ? 7500.0 : 2250000.0; + }else{ + pInfo->estimatedCost = nSeenMatch ? 10000.0 : 3000000.0; + } + for(i=1; iestimatedCost *= 0.4; + } + fts5SetEstimatedRows(pInfo, (i64)(pInfo->estimatedCost / 4.0)); } pInfo->idxNum = idxFlags; @@ -254651,7 +257644,9 @@ static int fts5CursorReseek(Fts5Cursor *pCsr, int *pbSkip){ int bDesc = pCsr->bDesc; i64 iRowid = sqlite3Fts5ExprRowid(pCsr->pExpr); - rc = sqlite3Fts5ExprFirst(pCsr->pExpr, pTab->p.pIndex, iRowid, bDesc); + rc = sqlite3Fts5ExprFirst( + pCsr->pExpr, pTab->p.pIndex, iRowid, pCsr->iLastRowid, bDesc + ); if( rc==SQLITE_OK && iRowid!=sqlite3Fts5ExprRowid(pCsr->pExpr) ){ *pbSkip = 1; } @@ -254823,7 +257818,9 @@ static int fts5CursorFirstSorted( static int fts5CursorFirst(Fts5FullTable *pTab, Fts5Cursor *pCsr, int bDesc){ int rc; Fts5Expr *pExpr = pCsr->pExpr; - rc = sqlite3Fts5ExprFirst(pExpr, pTab->p.pIndex, pCsr->iFirstRowid, bDesc); + rc = sqlite3Fts5ExprFirst( + pExpr, pTab->p.pIndex, pCsr->iFirstRowid, pCsr->iLastRowid, bDesc + ); if( sqlite3Fts5ExprEof(pExpr) ){ CsrFlagSet(pCsr, FTS5CSR_EOF); } @@ -257308,7 +260305,7 @@ static void fts5SourceIdFunc( ){ assert( nArg==0 ); UNUSED_PARAM2(nArg, apUnused); - sqlite3_result_text(pCtx, "fts5: 2025-07-30 19:33:53 4d8adfb30e03f9cf27f800a2c1ba3c48fb4ca1b08b0f5ed59a4d5ecbf45e20a3", -1, SQLITE_TRANSIENT); + sqlite3_result_text(pCtx, "fts5: 2025-11-28 17:28:25 281fc0e9afc38674b9b0991943b9e9d1e64c6cbdb133d35f6f5c87ff6af38a88", -1, SQLITE_TRANSIENT); } /* @@ -257331,9 +260328,9 @@ static void fts5LocaleFunc( sqlite3_value **apArg /* Function arguments */ ){ const char *zLocale = 0; - int nLocale = 0; + i64 nLocale = 0; const char *zText = 0; - int nText = 0; + i64 nText = 0; assert( nArg==2 ); UNUSED_PARAM(nArg); @@ -257350,10 +260347,10 @@ static void fts5LocaleFunc( Fts5Global *p = (Fts5Global*)sqlite3_user_data(pCtx); u8 *pBlob = 0; u8 *pCsr = 0; - int nBlob = 0; + i64 nBlob = 0; nBlob = FTS5_LOCALE_HDR_SIZE + nLocale + 1 + nText; - pBlob = (u8*)sqlite3_malloc(nBlob); + pBlob = (u8*)sqlite3_malloc64(nBlob); if( pBlob==0 ){ sqlite3_result_error_nomem(pCtx); return; @@ -257431,8 +260428,9 @@ static int fts5IntegrityMethod( " FTS5 table %s.%s: %s", zSchema, zTabname, sqlite3_errstr(rc)); } + }else if( (rc&0xff)==SQLITE_CORRUPT ){ + rc = SQLITE_OK; } - sqlite3Fts5IndexCloseReader(pTab->p.pIndex); pTab->p.pConfig->pzErrmsg = 0; @@ -262128,7 +265126,12 @@ static int fts5VocabOpenMethod( return rc; } +/* +** Restore cursor pCsr to the state it was in immediately after being +** created by the xOpen() method. +*/ static void fts5VocabResetCursor(Fts5VocabCursor *pCsr){ + int nCol = pCsr->pFts5->pConfig->nCol; pCsr->rowid = 0; sqlite3Fts5IterClose(pCsr->pIter); sqlite3Fts5StructureRelease(pCsr->pStruct); @@ -262138,6 +265141,12 @@ static void fts5VocabResetCursor(Fts5VocabCursor *pCsr){ pCsr->nLeTerm = -1; pCsr->zLeTerm = 0; pCsr->bEof = 0; + pCsr->iCol = 0; + pCsr->iInstPos = 0; + pCsr->iInstOff = 0; + pCsr->colUsed = 0; + memset(pCsr->aCnt, 0, sizeof(i64)*nCol); + memset(pCsr->aDoc, 0, sizeof(i64)*nCol); } /* diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h index c34235d84dc..fcb9c45394f 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h @@ -147,9 +147,12 @@ extern "C" { ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.50.4" -#define SQLITE_VERSION_NUMBER 3050004 -#define SQLITE_SOURCE_ID "2025-07-30 19:33:53 4d8adfb30e03f9cf27f800a2c1ba3c48fb4ca1b08b0f5ed59a4d5ecbf45e20a3" +#define SQLITE_VERSION "3.51.1" +#define SQLITE_VERSION_NUMBER 3051001 +#define SQLITE_SOURCE_ID "2025-11-28 17:28:25 281fc0e9afc38674b9b0991943b9e9d1e64c6cbdb133d35f6f5c87ff6af38a88" +#define SQLITE_SCM_BRANCH "branch-3.51" +#define SQLITE_SCM_TAGS "release version-3.51.1" +#define SQLITE_SCM_DATETIME "2025-11-28T17:28:25.933Z" /* ** CAPI3REF: Run-Time Library Version Numbers @@ -169,9 +172,9 @@ extern "C" { ** assert( strcmp(sqlite3_libversion(),SQLITE_VERSION)==0 ); ** )^ ** -** ^The sqlite3_version[] string constant contains the text of [SQLITE_VERSION] -** macro. ^The sqlite3_libversion() function returns a pointer to the -** to the sqlite3_version[] string constant. The sqlite3_libversion() +** ^The sqlite3_version[] string constant contains the text of the +** [SQLITE_VERSION] macro. ^The sqlite3_libversion() function returns a +** pointer to the sqlite3_version[] string constant. The sqlite3_libversion() ** function is provided for use in DLLs since DLL users usually do not have ** direct access to string constants within the DLL. ^The ** sqlite3_libversion_number() function returns an integer equal to @@ -371,7 +374,7 @@ typedef int (*sqlite3_callback)(void*,int,char**, char**); ** without having to use a lot of C code. ** ** ^The sqlite3_exec() interface runs zero or more UTF-8 encoded, -** semicolon-separate SQL statements passed into its 2nd argument, +** semicolon-separated SQL statements passed into its 2nd argument, ** in the context of the [database connection] passed in as its 1st ** argument. ^If the callback function of the 3rd argument to ** sqlite3_exec() is not NULL, then it is invoked for each result row @@ -404,7 +407,7 @@ typedef int (*sqlite3_callback)(void*,int,char**, char**); ** result row is NULL then the corresponding string pointer for the ** sqlite3_exec() callback is a NULL pointer. ^The 4th argument to the ** sqlite3_exec() callback is an array of pointers to strings where each -** entry represents the name of corresponding result column as obtained +** entry represents the name of a corresponding result column as obtained ** from [sqlite3_column_name()]. ** ** ^If the 2nd parameter to sqlite3_exec() is a NULL pointer, a pointer @@ -498,6 +501,9 @@ SQLITE_API int sqlite3_exec( #define SQLITE_ERROR_MISSING_COLLSEQ (SQLITE_ERROR | (1<<8)) #define SQLITE_ERROR_RETRY (SQLITE_ERROR | (2<<8)) #define SQLITE_ERROR_SNAPSHOT (SQLITE_ERROR | (3<<8)) +#define SQLITE_ERROR_RESERVESIZE (SQLITE_ERROR | (4<<8)) +#define SQLITE_ERROR_KEY (SQLITE_ERROR | (5<<8)) +#define SQLITE_ERROR_UNABLE (SQLITE_ERROR | (6<<8)) #define SQLITE_IOERR_READ (SQLITE_IOERR | (1<<8)) #define SQLITE_IOERR_SHORT_READ (SQLITE_IOERR | (2<<8)) #define SQLITE_IOERR_WRITE (SQLITE_IOERR | (3<<8)) @@ -532,6 +538,8 @@ SQLITE_API int sqlite3_exec( #define SQLITE_IOERR_DATA (SQLITE_IOERR | (32<<8)) #define SQLITE_IOERR_CORRUPTFS (SQLITE_IOERR | (33<<8)) #define SQLITE_IOERR_IN_PAGE (SQLITE_IOERR | (34<<8)) +#define SQLITE_IOERR_BADKEY (SQLITE_IOERR | (35<<8)) +#define SQLITE_IOERR_CODEC (SQLITE_IOERR | (36<<8)) #define SQLITE_LOCKED_SHAREDCACHE (SQLITE_LOCKED | (1<<8)) #define SQLITE_LOCKED_VTAB (SQLITE_LOCKED | (2<<8)) #define SQLITE_BUSY_RECOVERY (SQLITE_BUSY | (1<<8)) @@ -590,7 +598,7 @@ SQLITE_API int sqlite3_exec( ** Note in particular that passing the SQLITE_OPEN_EXCLUSIVE flag into ** [sqlite3_open_v2()] does *not* cause the underlying database file ** to be opened using O_EXCL. Passing SQLITE_OPEN_EXCLUSIVE into -** [sqlite3_open_v2()] has historically be a no-op and might become an +** [sqlite3_open_v2()] has historically been a no-op and might become an ** error in future versions of SQLite. */ #define SQLITE_OPEN_READONLY 0x00000001 /* Ok for sqlite3_open_v2() */ @@ -684,7 +692,7 @@ SQLITE_API int sqlite3_exec( ** SQLite uses one of these integer values as the second ** argument to calls it makes to the xLock() and xUnlock() methods ** of an [sqlite3_io_methods] object. These values are ordered from -** lest restrictive to most restrictive. +** least restrictive to most restrictive. ** ** The argument to xLock() is always SHARED or higher. The argument to ** xUnlock is either SHARED or NONE. @@ -925,7 +933,7 @@ struct sqlite3_io_methods { ** connection. See also [SQLITE_FCNTL_FILE_POINTER]. ** **
  • [[SQLITE_FCNTL_SYNC_OMITTED]] -** No longer in use. +** The SQLITE_FCNTL_SYNC_OMITTED file-control is no longer used. ** **
  • [[SQLITE_FCNTL_SYNC]] ** The [SQLITE_FCNTL_SYNC] opcode is generated internally by SQLite and @@ -1000,7 +1008,7 @@ struct sqlite3_io_methods { ** **
  • [[SQLITE_FCNTL_VFSNAME]] ** ^The [SQLITE_FCNTL_VFSNAME] opcode can be used to obtain the names of -** all [VFSes] in the VFS stack. The names are of all VFS shims and the +** all [VFSes] in the VFS stack. The names of all VFS shims and the ** final bottom-level VFS are written into memory obtained from ** [sqlite3_malloc()] and the result is stored in the char* variable ** that the fourth parameter of [sqlite3_file_control()] points to. @@ -1014,7 +1022,7 @@ struct sqlite3_io_methods { ** ^The [SQLITE_FCNTL_VFS_POINTER] opcode finds a pointer to the top-level ** [VFSes] currently in use. ^(The argument X in ** sqlite3_file_control(db,SQLITE_FCNTL_VFS_POINTER,X) must be -** of type "[sqlite3_vfs] **". This opcodes will set *X +** of type "[sqlite3_vfs] **". This opcode will set *X ** to a pointer to the top-level VFS.)^ ** ^When there are multiple VFS shims in the stack, this opcode finds the ** upper-most shim only. @@ -1204,7 +1212,7 @@ struct sqlite3_io_methods { **
  • [[SQLITE_FCNTL_EXTERNAL_READER]] ** The EXPERIMENTAL [SQLITE_FCNTL_EXTERNAL_READER] opcode is used to detect ** whether or not there is a database client in another process with a wal-mode -** transaction open on the database or not. It is only available on unix.The +** transaction open on the database or not. It is only available on unix. The ** (void*) argument passed with this file-control should be a pointer to a ** value of type (int). The integer value is set to 1 if the database is a wal ** mode database and there exists at least one client in another process that @@ -1222,6 +1230,15 @@ struct sqlite3_io_methods { ** database is not a temp db, then the [SQLITE_FCNTL_RESET_CACHE] file-control ** purges the contents of the in-memory page cache. If there is an open ** transaction, or if the db is a temp-db, this opcode is a no-op, not an error. +** +**
  • [[SQLITE_FCNTL_FILESTAT]] +** The [SQLITE_FCNTL_FILESTAT] opcode returns low-level diagnostic information +** about the [sqlite3_file] objects used access the database and journal files +** for the given schema. The fourth parameter to [sqlite3_file_control()] +** should be an initialized [sqlite3_str] pointer. JSON text describing +** various aspects of the sqlite3_file object is appended to the sqlite3_str. +** The SQLITE_FCNTL_FILESTAT opcode is usually a no-op, unless compile-time +** options are used to enable it. ** */ #define SQLITE_FCNTL_LOCKSTATE 1 @@ -1267,6 +1284,7 @@ struct sqlite3_io_methods { #define SQLITE_FCNTL_RESET_CACHE 42 #define SQLITE_FCNTL_NULL_IO 43 #define SQLITE_FCNTL_BLOCK_ON_CONNECT 44 +#define SQLITE_FCNTL_FILESTAT 45 /* deprecated names */ #define SQLITE_GET_LOCKPROXYFILE SQLITE_FCNTL_GET_LOCKPROXYFILE @@ -1629,7 +1647,7 @@ struct sqlite3_vfs { ** SQLite interfaces so that an application usually does not need to ** invoke sqlite3_initialize() directly. For example, [sqlite3_open()] ** calls sqlite3_initialize() so the SQLite library will be automatically -** initialized when [sqlite3_open()] is called if it has not be initialized +** initialized when [sqlite3_open()] is called if it has not been initialized ** already. ^However, if SQLite is compiled with the [SQLITE_OMIT_AUTOINIT] ** compile-time option, then the automatic calls to sqlite3_initialize() ** are omitted and the application must call sqlite3_initialize() directly @@ -1886,21 +1904,21 @@ struct sqlite3_mem_methods { ** The [sqlite3_mem_methods] ** structure is filled with the currently defined memory allocation routines.)^ ** This option can be used to overload the default memory allocation -** routines with a wrapper that simulations memory allocation failure or +** routines with a wrapper that simulates memory allocation failure or ** tracks memory usage, for example.
  • ** ** [[SQLITE_CONFIG_SMALL_MALLOC]]
    SQLITE_CONFIG_SMALL_MALLOC
    -**
    ^The SQLITE_CONFIG_SMALL_MALLOC option takes single argument of +**
    ^The SQLITE_CONFIG_SMALL_MALLOC option takes a single argument of ** type int, interpreted as a boolean, which if true provides a hint to ** SQLite that it should avoid large memory allocations if possible. ** SQLite will run faster if it is free to make large memory allocations, -** but some application might prefer to run slower in exchange for +** but some applications might prefer to run slower in exchange for ** guarantees about memory fragmentation that are possible if large ** allocations are avoided. This hint is normally off. **
    ** ** [[SQLITE_CONFIG_MEMSTATUS]]
    SQLITE_CONFIG_MEMSTATUS
    -**
    ^The SQLITE_CONFIG_MEMSTATUS option takes single argument of type int, +**
    ^The SQLITE_CONFIG_MEMSTATUS option takes a single argument of type int, ** interpreted as a boolean, which enables or disables the collection of ** memory allocation statistics. ^(When memory allocation statistics are ** disabled, the following SQLite interfaces become non-operational: @@ -1945,7 +1963,7 @@ struct sqlite3_mem_methods { ** ^If pMem is NULL and N is non-zero, then each database connection ** does an initial bulk allocation for page cache memory ** from [sqlite3_malloc()] sufficient for N cache lines if N is positive or -** of -1024*N bytes if N is negative, . ^If additional +** of -1024*N bytes if N is negative. ^If additional ** page cache memory is needed beyond what is provided by the initial ** allocation, then SQLite goes to [sqlite3_malloc()] separately for each ** additional cache line.
    @@ -1974,7 +1992,7 @@ struct sqlite3_mem_methods { **
    ^(The SQLITE_CONFIG_MUTEX option takes a single argument which is a ** pointer to an instance of the [sqlite3_mutex_methods] structure. ** The argument specifies alternative low-level mutex routines to be used -** in place the mutex routines built into SQLite.)^ ^SQLite makes a copy of +** in place of the mutex routines built into SQLite.)^ ^SQLite makes a copy of ** the content of the [sqlite3_mutex_methods] structure before the call to ** [sqlite3_config()] returns. ^If SQLite is compiled with ** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then @@ -2016,7 +2034,7 @@ struct sqlite3_mem_methods { ** ** [[SQLITE_CONFIG_GETPCACHE2]]
    SQLITE_CONFIG_GETPCACHE2
    **
    ^(The SQLITE_CONFIG_GETPCACHE2 option takes a single argument which -** is a pointer to an [sqlite3_pcache_methods2] object. SQLite copies of +** is a pointer to an [sqlite3_pcache_methods2] object. SQLite copies off ** the current page cache implementation into that object.)^
    ** ** [[SQLITE_CONFIG_LOG]]
    SQLITE_CONFIG_LOG
    @@ -2033,7 +2051,7 @@ struct sqlite3_mem_methods { ** the logger function is a copy of the first parameter to the corresponding ** [sqlite3_log()] call and is intended to be a [result code] or an ** [extended result code]. ^The third parameter passed to the logger is -** log message after formatting via [sqlite3_snprintf()]. +** a log message after formatting via [sqlite3_snprintf()]. ** The SQLite logging interface is not reentrant; the logger function ** supplied by the application must not invoke any SQLite interface. ** In a multi-threaded application, the application-defined logger @@ -2224,7 +2242,7 @@ struct sqlite3_mem_methods { ** These constants are the available integer configuration options that ** can be passed as the second parameter to the [sqlite3_db_config()] interface. ** -** The [sqlite3_db_config()] interface is a var-args functions. It takes a +** The [sqlite3_db_config()] interface is a var-args function. It takes a ** variable number of parameters, though always at least two. The number of ** parameters passed into sqlite3_db_config() depends on which of these ** constants is given as the second parameter. This documentation page @@ -2336,17 +2354,20 @@ struct sqlite3_mem_methods { ** ** [[SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER]] **
    SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER
    -**
    ^This option is used to enable or disable the -** [fts3_tokenizer()] function which is part of the -** [FTS3] full-text search engine extension. -** There must be two additional arguments. -** The first argument is an integer which is 0 to disable fts3_tokenizer() or -** positive to enable fts3_tokenizer() or negative to leave the setting -** unchanged. -** The second parameter is a pointer to an integer into which -** is written 0 or 1 to indicate whether fts3_tokenizer is disabled or enabled -** following this call. The second parameter may be a NULL pointer, in -** which case the new setting is not reported back.
    +**
    ^This option is used to enable or disable using the +** [fts3_tokenizer()] function - part of the [FTS3] full-text search engine +** extension - without using bound parameters as the parameters. Doing so +** is disabled by default. There must be two additional arguments. The first +** argument is an integer. If it is passed 0, then using fts3_tokenizer() +** without bound parameters is disabled. If it is passed a positive value, +** then calling fts3_tokenizer without bound parameters is enabled. If it +** is passed a negative value, this setting is not modified - this can be +** used to query for the current setting. The second parameter is a pointer +** to an integer into which is written 0 or 1 to indicate the current value +** of this setting (after it is modified, if applicable). The second +** parameter may be a NULL pointer, in which case the value of the setting +** is not reported back. Refer to [FTS3] documentation for further details. +**
    ** ** [[SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION]] **
    SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION
    @@ -2358,8 +2379,8 @@ struct sqlite3_mem_methods { ** When the first argument to this interface is 1, then only the C-API is ** enabled and the SQL function remains disabled. If the first argument to ** this interface is 0, then both the C-API and the SQL function are disabled. -** If the first argument is -1, then no changes are made to state of either the -** C-API or the SQL function. +** If the first argument is -1, then no changes are made to the state of either +** the C-API or the SQL function. ** The second parameter is a pointer to an integer into which ** is written 0 or 1 to indicate whether [sqlite3_load_extension()] interface ** is disabled or enabled following this call. The second parameter may @@ -2477,7 +2498,7 @@ struct sqlite3_mem_methods { ** [[SQLITE_DBCONFIG_LEGACY_ALTER_TABLE]] **
    SQLITE_DBCONFIG_LEGACY_ALTER_TABLE
    **
    The SQLITE_DBCONFIG_LEGACY_ALTER_TABLE option activates or deactivates -** the legacy behavior of the [ALTER TABLE RENAME] command such it +** the legacy behavior of the [ALTER TABLE RENAME] command such that it ** behaves as it did prior to [version 3.24.0] (2018-06-04). See the ** "Compatibility Notice" on the [ALTER TABLE RENAME documentation] for ** additional information. This feature can also be turned on and off @@ -2526,7 +2547,7 @@ struct sqlite3_mem_methods { **
    SQLITE_DBCONFIG_LEGACY_FILE_FORMAT
    **
    The SQLITE_DBCONFIG_LEGACY_FILE_FORMAT option activates or deactivates ** the legacy file format flag. When activated, this flag causes all newly -** created database file to have a schema format version number (the 4-byte +** created database files to have a schema format version number (the 4-byte ** integer found at offset 44 into the database header) of 1. This in turn ** means that the resulting database file will be readable and writable by ** any SQLite version back to 3.0.0 ([dateof:3.0.0]). Without this setting, @@ -2553,7 +2574,7 @@ struct sqlite3_mem_methods { ** the database handle both when the SQL statement is prepared and when it ** is stepped. The flag is set (collection of statistics is enabled) ** by default.

    This option takes two arguments: an integer and a pointer to -** an integer.. The first argument is 1, 0, or -1 to enable, disable, or +** an integer. The first argument is 1, 0, or -1 to enable, disable, or ** leave unchanged the statement scanstatus option. If the second argument ** is not NULL, then the value of the statement scanstatus setting after ** processing the first argument is written into the integer that the second @@ -2596,8 +2617,8 @@ struct sqlite3_mem_methods { **

    The SQLITE_DBCONFIG_ENABLE_ATTACH_WRITE option enables or disables the ** ability of the [ATTACH DATABASE] SQL command to open a database for writing. ** This capability is enabled by default. Applications can disable or -** reenable this capability using the current DBCONFIG option. If the -** the this capability is disabled, the [ATTACH] command will still work, +** reenable this capability using the current DBCONFIG option. If +** this capability is disabled, the [ATTACH] command will still work, ** but the database will be opened read-only. If this option is disabled, ** then the ability to create a new database using [ATTACH] is also disabled, ** regardless of the value of the [SQLITE_DBCONFIG_ENABLE_ATTACH_CREATE] @@ -2631,7 +2652,7 @@ struct sqlite3_mem_methods { ** **

    Most of the SQLITE_DBCONFIG options take two arguments, so that the ** overall call to [sqlite3_db_config()] has a total of four parameters. -** The first argument (the third parameter to sqlite3_db_config()) is a integer. +** The first argument (the third parameter to sqlite3_db_config()) is an integer. ** The second argument is a pointer to an integer. If the first argument is 1, ** then the option becomes enabled. If the first integer argument is 0, then the ** option is disabled. If the first argument is -1, then the option setting @@ -2921,7 +2942,7 @@ SQLITE_API int sqlite3_is_interrupted(sqlite3*); ** ^These routines return 0 if the statement is incomplete. ^If a ** memory allocation fails, then SQLITE_NOMEM is returned. ** -** ^These routines do not parse the SQL statements thus +** ^These routines do not parse the SQL statements and thus ** will not detect syntactically incorrect SQL. ** ** ^(If SQLite has not been initialized using [sqlite3_initialize()] prior @@ -3038,7 +3059,7 @@ SQLITE_API int sqlite3_busy_timeout(sqlite3*, int ms); ** indefinitely if possible. The results of passing any other negative value ** are undefined. ** -** Internally, each SQLite database handle store two timeout values - the +** Internally, each SQLite database handle stores two timeout values - the ** busy-timeout (used for rollback mode databases, or if the VFS does not ** support blocking locks) and the setlk-timeout (used for blocking locks ** on wal-mode databases). The sqlite3_busy_timeout() method sets both @@ -3068,7 +3089,7 @@ SQLITE_API int sqlite3_setlk_timeout(sqlite3*, int ms, int flags); ** This is a legacy interface that is preserved for backwards compatibility. ** Use of this interface is not recommended. ** -** Definition: A result table is memory data structure created by the +** Definition: A result table is a memory data structure created by the ** [sqlite3_get_table()] interface. A result table records the ** complete query results from one or more queries. ** @@ -3211,7 +3232,7 @@ SQLITE_API char *sqlite3_vsnprintf(int,char*,const char*, va_list); ** ^Calling sqlite3_free() with a pointer previously returned ** by sqlite3_malloc() or sqlite3_realloc() releases that memory so ** that it might be reused. ^The sqlite3_free() routine is -** a no-op if is called with a NULL pointer. Passing a NULL pointer +** a no-op if it is called with a NULL pointer. Passing a NULL pointer ** to sqlite3_free() is harmless. After being freed, memory ** should neither be read nor written. Even reading previously freed ** memory might result in a segmentation fault or other severe error. @@ -3229,13 +3250,13 @@ SQLITE_API char *sqlite3_vsnprintf(int,char*,const char*, va_list); ** sqlite3_free(X). ** ^sqlite3_realloc(X,N) returns a pointer to a memory allocation ** of at least N bytes in size or NULL if insufficient memory is available. -** ^If M is the size of the prior allocation, then min(N,M) bytes -** of the prior allocation are copied into the beginning of buffer returned +** ^If M is the size of the prior allocation, then min(N,M) bytes of the +** prior allocation are copied into the beginning of the buffer returned ** by sqlite3_realloc(X,N) and the prior allocation is freed. ** ^If sqlite3_realloc(X,N) returns NULL and N is positive, then the ** prior allocation is not freed. ** -** ^The sqlite3_realloc64(X,N) interfaces works the same as +** ^The sqlite3_realloc64(X,N) interface works the same as ** sqlite3_realloc(X,N) except that N is a 64-bit unsigned integer instead ** of a 32-bit signed integer. ** @@ -3285,7 +3306,7 @@ SQLITE_API sqlite3_uint64 sqlite3_msize(void*); ** was last reset. ^The values returned by [sqlite3_memory_used()] and ** [sqlite3_memory_highwater()] include any overhead ** added by SQLite in its implementation of [sqlite3_malloc()], -** but not overhead added by the any underlying system library +** but not overhead added by any underlying system library ** routines that [sqlite3_malloc()] may call. ** ** ^The memory high-water mark is reset to the current value of @@ -3737,7 +3758,7 @@ SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*); ** there is no harm in trying.) ** ** ^(

    [SQLITE_OPEN_SHAREDCACHE]
    -**
    The database is opened [shared cache] enabled, overriding +**
    The database is opened with [shared cache] enabled, overriding ** the default shared cache setting provided by ** [sqlite3_enable_shared_cache()].)^ ** The [use of shared cache mode is discouraged] and hence shared cache @@ -3745,7 +3766,7 @@ SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*); ** this option is a no-op. ** ** ^(
    [SQLITE_OPEN_PRIVATECACHE]
    -**
    The database is opened [shared cache] disabled, overriding +**
    The database is opened with [shared cache] disabled, overriding ** the default shared cache setting provided by ** [sqlite3_enable_shared_cache()].)^ ** @@ -4163,7 +4184,7 @@ SQLITE_API void sqlite3_free_filename(sqlite3_filename); ** subsequent calls to other SQLite interface functions.)^ ** ** ^The sqlite3_errstr(E) interface returns the English-language text -** that describes the [result code] E, as UTF-8, or NULL if E is not an +** that describes the [result code] E, as UTF-8, or NULL if E is not a ** result code for which a text error message is available. ** ^(Memory to hold the error message string is managed internally ** and must not be freed by the application)^. @@ -4171,7 +4192,7 @@ SQLITE_API void sqlite3_free_filename(sqlite3_filename); ** ^If the most recent error references a specific token in the input ** SQL, the sqlite3_error_offset() interface returns the byte offset ** of the start of that token. ^The byte offset returned by -** sqlite3_error_offset() assumes that the input SQL is UTF8. +** sqlite3_error_offset() assumes that the input SQL is UTF-8. ** ^If the most recent error does not reference a specific token in the input ** SQL, then the sqlite3_error_offset() function returns -1. ** @@ -4196,6 +4217,34 @@ SQLITE_API const void *sqlite3_errmsg16(sqlite3*); SQLITE_API const char *sqlite3_errstr(int); SQLITE_API int sqlite3_error_offset(sqlite3 *db); +/* +** CAPI3REF: Set Error Codes And Message +** METHOD: sqlite3 +** +** Set the error code of the database handle passed as the first argument +** to errcode, and the error message to a copy of nul-terminated string +** zErrMsg. If zErrMsg is passed NULL, then the error message is set to +** the default message associated with the supplied error code. Subsequent +** calls to [sqlite3_errcode()] and [sqlite3_errmsg()] and similar will +** return the values set by this routine in place of what was previously +** set by SQLite itself. +** +** This function returns SQLITE_OK if the error code and error message are +** successfully set, SQLITE_NOMEM if an OOM occurs, and SQLITE_MISUSE if +** the database handle is NULL or invalid. +** +** The error code and message set by this routine remains in effect until +** they are changed, either by another call to this routine or until they are +** changed to by SQLite itself to reflect the result of some subsquent +** API call. +** +** This function is intended for use by SQLite extensions or wrappers. The +** idea is that an extension or wrapper can use this routine to set error +** messages and error codes and thus behave more like a core SQLite +** feature from the point of view of an application. +*/ +SQLITE_API int sqlite3_set_errmsg(sqlite3 *db, int errcode, const char *zErrMsg); + /* ** CAPI3REF: Prepared Statement Object ** KEYWORDS: {prepared statement} {prepared statements} @@ -4270,8 +4319,8 @@ SQLITE_API int sqlite3_limit(sqlite3*, int id, int newVal); ** ** These constants define various performance limits ** that can be lowered at run-time using [sqlite3_limit()]. -** The synopsis of the meanings of the various limits is shown below. -** Additional information is available at [limits | Limits in SQLite]. +** A concise description of these limits follows, and additional information +** is available at [limits | Limits in SQLite]. ** **
    ** [[SQLITE_LIMIT_LENGTH]] ^(
    SQLITE_LIMIT_LENGTH
    @@ -4336,7 +4385,7 @@ SQLITE_API int sqlite3_limit(sqlite3*, int id, int newVal); /* ** CAPI3REF: Prepare Flags ** -** These constants define various flags that can be passed into +** These constants define various flags that can be passed into the ** "prepFlags" parameter of the [sqlite3_prepare_v3()] and ** [sqlite3_prepare16_v3()] interfaces. ** @@ -4423,7 +4472,7 @@ SQLITE_API int sqlite3_limit(sqlite3*, int id, int newVal); ** there is a small performance advantage to passing an nByte parameter that ** is the number of bytes in the input string including ** the nul-terminator. -** Note that nByte measure the length of the input in bytes, not +** Note that nByte measures the length of the input in bytes, not ** characters, even for the UTF-16 interfaces. ** ** ^If pzTail is not NULL then *pzTail is made to point to the first byte @@ -4557,7 +4606,7 @@ SQLITE_API int sqlite3_prepare16_v3( ** ** ^The sqlite3_expanded_sql() interface returns NULL if insufficient memory ** is available to hold the result, or if the result would exceed the -** the maximum string length determined by the [SQLITE_LIMIT_LENGTH]. +** maximum string length determined by the [SQLITE_LIMIT_LENGTH]. ** ** ^The [SQLITE_TRACE_SIZE_LIMIT] compile-time option limits the size of ** bound parameter expansions. ^The [SQLITE_OMIT_TRACE] compile-time @@ -4745,7 +4794,7 @@ typedef struct sqlite3_value sqlite3_value; ** ** The context in which an SQL function executes is stored in an ** sqlite3_context object. ^A pointer to an sqlite3_context object -** is always first parameter to [application-defined SQL functions]. +** is always the first parameter to [application-defined SQL functions]. ** The application-defined SQL function implementation will pass this ** pointer through into calls to [sqlite3_result_int | sqlite3_result()], ** [sqlite3_aggregate_context()], [sqlite3_user_data()], @@ -4869,9 +4918,11 @@ typedef struct sqlite3_context sqlite3_context; ** associated with the pointer P of type T. ^D is either a NULL pointer or ** a pointer to a destructor function for P. ^SQLite will invoke the ** destructor D with a single argument of P when it is finished using -** P. The T parameter should be a static string, preferably a string -** literal. The sqlite3_bind_pointer() routine is part of the -** [pointer passing interface] added for SQLite 3.20.0. +** P, even if the call to sqlite3_bind_pointer() fails. Due to a +** historical design quirk, results are undefined if D is +** SQLITE_TRANSIENT. The T parameter should be a static string, +** preferably a string literal. The sqlite3_bind_pointer() routine is +** part of the [pointer passing interface] added for SQLite 3.20.0. ** ** ^If any of the sqlite3_bind_*() routines are called with a NULL pointer ** for the [prepared statement] or with a prepared statement for which @@ -5482,7 +5533,7 @@ SQLITE_API int sqlite3_column_type(sqlite3_stmt*, int iCol); ** ** ^The sqlite3_finalize() function is called to delete a [prepared statement]. ** ^If the most recent evaluation of the statement encountered no errors -** or if the statement is never been evaluated, then sqlite3_finalize() returns +** or if the statement has never been evaluated, then sqlite3_finalize() returns ** SQLITE_OK. ^If the most recent evaluation of statement S failed, then ** sqlite3_finalize(S) returns the appropriate [error code] or ** [extended error code]. @@ -5714,7 +5765,7 @@ SQLITE_API int sqlite3_create_window_function( /* ** CAPI3REF: Text Encodings ** -** These constant define integer codes that represent the various +** These constants define integer codes that represent the various ** text encodings supported by SQLite. */ #define SQLITE_UTF8 1 /* IMP: R-37514-35566 */ @@ -5806,7 +5857,7 @@ SQLITE_API int sqlite3_create_window_function( ** result. ** Every function that invokes [sqlite3_result_subtype()] should have this ** property. If it does not, then the call to [sqlite3_result_subtype()] -** might become a no-op if the function is used as term in an +** might become a no-op if the function is used as a term in an ** [expression index]. On the other hand, SQL functions that never invoke ** [sqlite3_result_subtype()] should avoid setting this property, as the ** purpose of this property is to disable certain optimizations that are @@ -5933,7 +5984,7 @@ SQLITE_API SQLITE_DEPRECATED int sqlite3_memory_alarm(void(*)(void*,sqlite3_int6 ** sqlite3_value_nochange(X) interface returns true if and only if ** the column corresponding to X is unchanged by the UPDATE operation ** that the xUpdate method call was invoked to implement and if -** and the prior [xColumn] method call that was invoked to extracted +** the prior [xColumn] method call that was invoked to extract ** the value for that column returned without setting a result (probably ** because it queried [sqlite3_vtab_nochange()] and found that the column ** was unchanging). ^Within an [xUpdate] method, any value for which @@ -6206,6 +6257,7 @@ SQLITE_API void sqlite3_set_auxdata(sqlite3_context*, int N, void*, void (*)(voi ** or a NULL pointer if there were no prior calls to ** sqlite3_set_clientdata() with the same values of D and N. ** Names are compared using strcmp() and are thus case sensitive. +** It returns 0 on success and SQLITE_NOMEM on allocation failure. ** ** If P and X are both non-NULL, then the destructor X is invoked with ** argument P on the first of the following occurrences: @@ -8882,9 +8934,18 @@ SQLITE_API int sqlite3_status64( ** ^The sqlite3_db_status() routine returns SQLITE_OK on success and a ** non-zero [error code] on failure. ** +** ^The sqlite3_db_status64(D,O,C,H,R) routine works exactly the same +** way as sqlite3_db_status(D,O,C,H,R) routine except that the C and H +** parameters are pointer to 64-bit integers (type: sqlite3_int64) instead +** of pointers to 32-bit integers, which allows larger status values +** to be returned. If a status value exceeds 2,147,483,647 then +** sqlite3_db_status() will truncate the value whereas sqlite3_db_status64() +** will return the full value. +** ** See also: [sqlite3_status()] and [sqlite3_stmt_status()]. */ SQLITE_API int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int resetFlg); +SQLITE_API int sqlite3_db_status64(sqlite3*,int,sqlite3_int64*,sqlite3_int64*,int); /* ** CAPI3REF: Status Parameters for database connections @@ -8981,6 +9042,10 @@ SQLITE_API int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int r ** If an IO or other error occurs while writing a page to disk, the effect ** on subsequent SQLITE_DBSTATUS_CACHE_WRITE requests is undefined.)^ ^The ** highwater mark associated with SQLITE_DBSTATUS_CACHE_WRITE is always 0. +**

    +** ^(There is overlap between the quantities measured by this parameter +** (SQLITE_DBSTATUS_CACHE_WRITE) and SQLITE_DBSTATUS_TEMPBUF_SPILL. +** Resetting one will reduce the other.)^ **

    ** ** [[SQLITE_DBSTATUS_CACHE_SPILL]] ^(
    SQLITE_DBSTATUS_CACHE_SPILL
    @@ -8996,6 +9061,18 @@ SQLITE_API int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int r **
    This parameter returns zero for the current value if and only if ** all foreign key constraints (deferred or immediate) have been ** resolved.)^ ^The highwater mark is always 0. +** +** [[SQLITE_DBSTATUS_TEMPBUF_SPILL] ^(
    SQLITE_DBSTATUS_TEMPBUF_SPILL
    +**
    ^(This parameter returns the number of bytes written to temporary +** files on disk that could have been kept in memory had sufficient memory +** been available. This value includes writes to intermediate tables that +** are part of complex queries, external sorts that spill to disk, and +** writes to TEMP tables.)^ +** ^The highwater mark is always 0. +**

    +** ^(There is overlap between the quantities measured by this parameter +** (SQLITE_DBSTATUS_TEMPBUF_SPILL) and SQLITE_DBSTATUS_CACHE_WRITE. +** Resetting one will reduce the other.)^ **

    ** */ @@ -9012,7 +9089,8 @@ SQLITE_API int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int r #define SQLITE_DBSTATUS_DEFERRED_FKS 10 #define SQLITE_DBSTATUS_CACHE_USED_SHARED 11 #define SQLITE_DBSTATUS_CACHE_SPILL 12 -#define SQLITE_DBSTATUS_MAX 12 /* Largest defined DBSTATUS */ +#define SQLITE_DBSTATUS_TEMPBUF_SPILL 13 +#define SQLITE_DBSTATUS_MAX 13 /* Largest defined DBSTATUS */ /* @@ -9777,7 +9855,7 @@ SQLITE_API void sqlite3_log(int iErrCode, const char *zFormat, ...); ** is the number of pages currently in the write-ahead log file, ** including those that were just committed. ** -** The callback function should normally return [SQLITE_OK]. ^If an error +** ^The callback function should normally return [SQLITE_OK]. ^If an error ** code is returned, that error will propagate back up through the ** SQLite code base to cause the statement that provoked the callback ** to report an error, though the commit will have still occurred. If the @@ -9785,13 +9863,26 @@ SQLITE_API void sqlite3_log(int iErrCode, const char *zFormat, ...); ** that does not correspond to any valid SQLite error code, the results ** are undefined. ** -** A single database handle may have at most a single write-ahead log callback -** registered at one time. ^Calling [sqlite3_wal_hook()] replaces any -** previously registered write-ahead log callback. ^The return value is -** a copy of the third parameter from the previous call, if any, or 0. -** ^Note that the [sqlite3_wal_autocheckpoint()] interface and the -** [wal_autocheckpoint pragma] both invoke [sqlite3_wal_hook()] and will -** overwrite any prior [sqlite3_wal_hook()] settings. +** ^A single database handle may have at most a single write-ahead log +** callback registered at one time. ^Calling [sqlite3_wal_hook()] +** replaces the default behavior or previously registered write-ahead +** log callback. +** +** ^The return value is a copy of the third parameter from the +** previous call, if any, or 0. +** +** ^The [sqlite3_wal_autocheckpoint()] interface and the +** [wal_autocheckpoint pragma] both invoke [sqlite3_wal_hook()] and +** will overwrite any prior [sqlite3_wal_hook()] settings. +** +** ^If a write-ahead log callback is set using this function then +** [sqlite3_wal_checkpoint_v2()] or [PRAGMA wal_checkpoint] +** should be invoked periodically to keep the write-ahead log file +** from growing without bound. +** +** ^Passing a NULL pointer for the callback disables automatic +** checkpointing entirely. To re-enable the default behavior, call +** sqlite3_wal_autocheckpoint(db,1000) or use [PRAGMA wal_checkpoint]. */ SQLITE_API void *sqlite3_wal_hook( sqlite3*, @@ -9808,7 +9899,7 @@ SQLITE_API void *sqlite3_wal_hook( ** to automatically [checkpoint] ** after committing a transaction if there are N or ** more frames in the [write-ahead log] file. ^Passing zero or -** a negative value as the nFrame parameter disables automatic +** a negative value as the N parameter disables automatic ** checkpoints entirely. ** ** ^The callback registered by this function replaces any existing callback @@ -9824,9 +9915,10 @@ SQLITE_API void *sqlite3_wal_hook( ** ** ^Every new [database connection] defaults to having the auto-checkpoint ** enabled with a threshold of 1000 or [SQLITE_DEFAULT_WAL_AUTOCHECKPOINT] -** pages. The use of this interface -** is only necessary if the default setting is found to be suboptimal -** for a particular application. +** pages. +** +** ^The use of this interface is only necessary if the default setting +** is found to be suboptimal for a particular application. */ SQLITE_API int sqlite3_wal_autocheckpoint(sqlite3 *db, int N); @@ -9891,6 +9983,11 @@ SQLITE_API int sqlite3_wal_checkpoint(sqlite3 *db, const char *zDb); ** ^This mode works the same way as SQLITE_CHECKPOINT_RESTART with the ** addition that it also truncates the log file to zero bytes just prior ** to a successful return. +** +**
    SQLITE_CHECKPOINT_NOOP
    +** ^This mode always checkpoints zero frames. The only reason to invoke +** a NOOP checkpoint is to access the values returned by +** sqlite3_wal_checkpoint_v2() via output parameters *pnLog and *pnCkpt. ** ** ** ^If pnLog is not NULL, then *pnLog is set to the total number of frames in @@ -9961,6 +10058,7 @@ SQLITE_API int sqlite3_wal_checkpoint_v2( ** See the [sqlite3_wal_checkpoint_v2()] documentation for details on the ** meaning of each of these checkpoint modes. */ +#define SQLITE_CHECKPOINT_NOOP -1 /* Do no work at all */ #define SQLITE_CHECKPOINT_PASSIVE 0 /* Do as much as possible w/o blocking */ #define SQLITE_CHECKPOINT_FULL 1 /* Wait for writers, then checkpoint */ #define SQLITE_CHECKPOINT_RESTART 2 /* Like FULL but wait for readers */ @@ -10329,7 +10427,7 @@ SQLITE_API int sqlite3_vtab_in(sqlite3_index_info*, int iCons, int bHandle); **   ){ **   // do something with pVal **   } -**   if( rc!=SQLITE_OK ){ +**   if( rc!=SQLITE_DONE ){ **   // an error has occurred **   } ** )^ @@ -10788,7 +10886,7 @@ typedef struct sqlite3_snapshot { ** The [sqlite3_snapshot_get()] interface is only available when the ** [SQLITE_ENABLE_SNAPSHOT] compile-time option is used. */ -SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_get( +SQLITE_API int sqlite3_snapshot_get( sqlite3 *db, const char *zSchema, sqlite3_snapshot **ppSnapshot @@ -10837,7 +10935,7 @@ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_get( ** The [sqlite3_snapshot_open()] interface is only available when the ** [SQLITE_ENABLE_SNAPSHOT] compile-time option is used. */ -SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_open( +SQLITE_API int sqlite3_snapshot_open( sqlite3 *db, const char *zSchema, sqlite3_snapshot *pSnapshot @@ -10854,7 +10952,7 @@ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_open( ** The [sqlite3_snapshot_free()] interface is only available when the ** [SQLITE_ENABLE_SNAPSHOT] compile-time option is used. */ -SQLITE_API SQLITE_EXPERIMENTAL void sqlite3_snapshot_free(sqlite3_snapshot*); +SQLITE_API void sqlite3_snapshot_free(sqlite3_snapshot*); /* ** CAPI3REF: Compare the ages of two snapshot handles. @@ -10881,7 +10979,7 @@ SQLITE_API SQLITE_EXPERIMENTAL void sqlite3_snapshot_free(sqlite3_snapshot*); ** This interface is only available if SQLite is compiled with the ** [SQLITE_ENABLE_SNAPSHOT] option. */ -SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_cmp( +SQLITE_API int sqlite3_snapshot_cmp( sqlite3_snapshot *p1, sqlite3_snapshot *p2 ); @@ -10909,7 +11007,7 @@ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_cmp( ** This interface is only available if SQLite is compiled with the ** [SQLITE_ENABLE_SNAPSHOT] option. */ -SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_recover(sqlite3 *db, const char *zDb); +SQLITE_API int sqlite3_snapshot_recover(sqlite3 *db, const char *zDb); /* ** CAPI3REF: Serialize a database @@ -10983,12 +11081,13 @@ SQLITE_API unsigned char *sqlite3_serialize( ** ** The sqlite3_deserialize(D,S,P,N,M,F) interface causes the ** [database connection] D to disconnect from database S and then -** reopen S as an in-memory database based on the serialization contained -** in P. The serialized database P is N bytes in size. M is the size of -** the buffer P, which might be larger than N. If M is larger than N, and -** the SQLITE_DESERIALIZE_READONLY bit is not set in F, then SQLite is -** permitted to add content to the in-memory database as long as the total -** size does not exceed M bytes. +** reopen S as an in-memory database based on the serialization +** contained in P. If S is a NULL pointer, the main database is +** used. The serialized database P is N bytes in size. M is the size +** of the buffer P, which might be larger than N. If M is larger than +** N, and the SQLITE_DESERIALIZE_READONLY bit is not set in F, then +** SQLite is permitted to add content to the in-memory database as +** long as the total size does not exceed M bytes. ** ** If the SQLITE_DESERIALIZE_FREEONCLOSE bit is set in F, then SQLite will ** invoke sqlite3_free() on the serialization buffer when the database @@ -11055,6 +11154,54 @@ SQLITE_API int sqlite3_deserialize( #define SQLITE_DESERIALIZE_RESIZEABLE 2 /* Resize using sqlite3_realloc64() */ #define SQLITE_DESERIALIZE_READONLY 4 /* Database is read-only */ +/* +** CAPI3REF: Bind array values to the CARRAY table-valued function +** +** The sqlite3_carray_bind(S,I,P,N,F,X) interface binds an array value to +** one of the first argument of the [carray() table-valued function]. The +** S parameter is a pointer to the [prepared statement] that uses the carray() +** functions. I is the parameter index to be bound. P is a pointer to the +** array to be bound, and N is the number of eements in the array. The +** F argument is one of constants [SQLITE_CARRAY_INT32], [SQLITE_CARRAY_INT64], +** [SQLITE_CARRAY_DOUBLE], [SQLITE_CARRAY_TEXT], or [SQLITE_CARRAY_BLOB] to +** indicate the datatype of the array being bound. The X argument is not a +** NULL pointer, then SQLite will invoke the function X on the P parameter +** after it has finished using P, even if the call to +** sqlite3_carray_bind() fails. The special-case finalizer +** SQLITE_TRANSIENT has no effect here. +*/ +SQLITE_API int sqlite3_carray_bind( + sqlite3_stmt *pStmt, /* Statement to be bound */ + int i, /* Parameter index */ + void *aData, /* Pointer to array data */ + int nData, /* Number of data elements */ + int mFlags, /* CARRAY flags */ + void (*xDel)(void*) /* Destructor for aData */ +); + +/* +** CAPI3REF: Datatypes for the CARRAY table-valued function +** +** The fifth argument to the [sqlite3_carray_bind()] interface musts be +** one of the following constants, to specify the datatype of the array +** that is being bound into the [carray table-valued function]. +*/ +#define SQLITE_CARRAY_INT32 0 /* Data is 32-bit signed integers */ +#define SQLITE_CARRAY_INT64 1 /* Data is 64-bit signed integers */ +#define SQLITE_CARRAY_DOUBLE 2 /* Data is doubles */ +#define SQLITE_CARRAY_TEXT 3 /* Data is char* */ +#define SQLITE_CARRAY_BLOB 4 /* Data is struct iovec */ + +/* +** Versions of the above #defines that omit the initial SQLITE_, for +** legacy compatibility. +*/ +#define CARRAY_INT32 0 /* Data is 32-bit signed integers */ +#define CARRAY_INT64 1 /* Data is 64-bit signed integers */ +#define CARRAY_DOUBLE 2 /* Data is doubles */ +#define CARRAY_TEXT 3 /* Data is char* */ +#define CARRAY_BLOB 4 /* Data is struct iovec */ + /* ** Undo the hack that converts floating point types to integer for ** builds on processors without floating point support. @@ -12314,14 +12461,32 @@ SQLITE_API void sqlite3changegroup_delete(sqlite3_changegroup*); ** update the "main" database attached to handle db with the changes found in ** the changeset passed via the second and third arguments. ** +** All changes made by these functions are enclosed in a savepoint transaction. +** If any other error (aside from a constraint failure when attempting to +** write to the target database) occurs, then the savepoint transaction is +** rolled back, restoring the target database to its original state, and an +** SQLite error code returned. Additionally, starting with version 3.51.0, +** an error code and error message that may be accessed using the +** [sqlite3_errcode()] and [sqlite3_errmsg()] APIs are left in the database +** handle. +** ** The fourth argument (xFilter) passed to these functions is the "filter -** callback". If it is not NULL, then for each table affected by at least one -** change in the changeset, the filter callback is invoked with -** the table name as the second argument, and a copy of the context pointer -** passed as the sixth argument as the first. If the "filter callback" -** returns zero, then no attempt is made to apply any changes to the table. -** Otherwise, if the return value is non-zero or the xFilter argument to -** is NULL, all changes related to the table are attempted. +** callback". This may be passed NULL, in which case all changes in the +** changeset are applied to the database. For sqlite3changeset_apply() and +** sqlite3_changeset_apply_v2(), if it is not NULL, then it is invoked once +** for each table affected by at least one change in the changeset. In this +** case the table name is passed as the second argument, and a copy of +** the context pointer passed as the sixth argument to apply() or apply_v2() +** as the first. If the "filter callback" returns zero, then no attempt is +** made to apply any changes to the table. Otherwise, if the return value is +** non-zero, all changes related to the table are attempted. +** +** For sqlite3_changeset_apply_v3(), the xFilter callback is invoked once +** per change. The second argument in this case is an sqlite3_changeset_iter +** that may be queried using the usual APIs for the details of the current +** change. If the "filter callback" returns zero in this case, then no attempt +** is made to apply the current change. If it returns non-zero, the change +** is applied. ** ** For each table that is not excluded by the filter callback, this function ** tests that the target database contains a compatible table. A table is @@ -12342,11 +12507,11 @@ SQLITE_API void sqlite3changegroup_delete(sqlite3_changegroup*); ** one such warning is issued for each table in the changeset. ** ** For each change for which there is a compatible table, an attempt is made -** to modify the table contents according to the UPDATE, INSERT or DELETE -** change. If a change cannot be applied cleanly, the conflict handler -** function passed as the fifth argument to sqlite3changeset_apply() may be -** invoked. A description of exactly when the conflict handler is invoked for -** each type of change is below. +** to modify the table contents according to each UPDATE, INSERT or DELETE +** change that is not excluded by a filter callback. If a change cannot be +** applied cleanly, the conflict handler function passed as the fifth argument +** to sqlite3changeset_apply() may be invoked. A description of exactly when +** the conflict handler is invoked for each type of change is below. ** ** Unlike the xFilter argument, xConflict may not be passed NULL. The results ** of passing anything other than a valid function pointer as the xConflict @@ -12442,12 +12607,6 @@ SQLITE_API void sqlite3changegroup_delete(sqlite3_changegroup*); ** This can be used to further customize the application's conflict ** resolution strategy. ** -** All changes made by these functions are enclosed in a savepoint transaction. -** If any other error (aside from a constraint failure when attempting to -** write to the target database) occurs, then the savepoint transaction is -** rolled back, restoring the target database to its original state, and an -** SQLite error code returned. -** ** If the output parameters (ppRebase) and (pnRebase) are non-NULL and ** the input is a changeset (not a patchset), then sqlite3changeset_apply_v2() ** may set (*ppRebase) to point to a "rebase" that may be used with the @@ -12497,6 +12656,23 @@ SQLITE_API int sqlite3changeset_apply_v2( void **ppRebase, int *pnRebase, /* OUT: Rebase data */ int flags /* SESSION_CHANGESETAPPLY_* flags */ ); +SQLITE_API int sqlite3changeset_apply_v3( + sqlite3 *db, /* Apply change to "main" db of this handle */ + int nChangeset, /* Size of changeset in bytes */ + void *pChangeset, /* Changeset blob */ + int(*xFilter)( + void *pCtx, /* Copy of sixth arg to _apply() */ + sqlite3_changeset_iter *p /* Handle describing change */ + ), + int(*xConflict)( + void *pCtx, /* Copy of sixth arg to _apply() */ + int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */ + sqlite3_changeset_iter *p /* Handle describing change and conflict */ + ), + void *pCtx, /* First argument passed to xConflict */ + void **ppRebase, int *pnRebase, /* OUT: Rebase data */ + int flags /* SESSION_CHANGESETAPPLY_* flags */ +); /* ** CAPI3REF: Flags for sqlite3changeset_apply_v2 @@ -12916,6 +13092,23 @@ SQLITE_API int sqlite3changeset_apply_v2_strm( void **ppRebase, int *pnRebase, int flags ); +SQLITE_API int sqlite3changeset_apply_v3_strm( + sqlite3 *db, /* Apply change to "main" db of this handle */ + int (*xInput)(void *pIn, void *pData, int *pnData), /* Input function */ + void *pIn, /* First arg for xInput */ + int(*xFilter)( + void *pCtx, /* Copy of sixth arg to _apply() */ + sqlite3_changeset_iter *p + ), + int(*xConflict)( + void *pCtx, /* Copy of sixth arg to _apply() */ + int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */ + sqlite3_changeset_iter *p /* Handle describing change and conflict */ + ), + void *pCtx, /* First argument passed to xConflict */ + void **ppRebase, int *pnRebase, + int flags +); SQLITE_API int sqlite3changeset_concat_strm( int (*xInputA)(void *pIn, void *pData, int *pnData), void *pInA, diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_column_metadata.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_column_metadata.go index 63659b46b62..9aff0b1bf31 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_column_metadata.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_column_metadata.go @@ -6,7 +6,7 @@ package sqlite3 /* #ifndef USE_LIBSQLITE3 #cgo CFLAGS: -DSQLITE_ENABLE_COLUMN_METADATA -#include +#include "sqlite3-binding.h" #else #include #endif diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_serialize.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_serialize.go index f1710c1c324..2d7fc0d7eb4 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_serialize.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_serialize.go @@ -5,7 +5,7 @@ package sqlite3 /* #ifndef USE_LIBSQLITE3 -#include +#include "sqlite3-binding.h" #else #include #endif diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3ext.h b/vendor/github.com/mattn/go-sqlite3/sqlite3ext.h index 3a5e0a4edb5..33eef8af62c 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3ext.h +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3ext.h @@ -373,6 +373,10 @@ struct sqlite3_api_routines { int (*set_clientdata)(sqlite3*, const char*, void*, void(*)(void*)); /* Version 3.50.0 and later */ int (*setlk_timeout)(sqlite3*,int,int); + /* Version 3.51.0 and later */ + int (*set_errmsg)(sqlite3*,int,const char*); + int (*db_status64)(sqlite3*,int,sqlite3_int64*,sqlite3_int64*,int); + }; /* @@ -708,6 +712,9 @@ typedef int (*sqlite3_loadext_entry)( #define sqlite3_set_clientdata sqlite3_api->set_clientdata /* Version 3.50.0 and later */ #define sqlite3_setlk_timeout sqlite3_api->setlk_timeout +/* Version 3.51.0 and later */ +#define sqlite3_set_errmsg sqlite3_api->set_errmsg +#define sqlite3_db_status64 sqlite3_api->db_status64 #endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */ #if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) diff --git a/vendor/github.com/minio/crc64nvme/crc64.go b/vendor/github.com/minio/crc64nvme/crc64.go index 40ac28c7655..ca34a48e09e 100644 --- a/vendor/github.com/minio/crc64nvme/crc64.go +++ b/vendor/github.com/minio/crc64nvme/crc64.go @@ -125,14 +125,19 @@ func update(crc uint64, p []byte) uint64 { p = p[align:] } runs := len(p) / 128 - crc = updateAsm(crc, p[:128*runs]) + if hasAsm512 && runs >= 8 { + // Use 512-bit wide instructions for >= 1KB. + crc = updateAsm512(crc, p[:128*runs]) + } else { + crc = updateAsm(crc, p[:128*runs]) + } return update(crc, p[128*runs:]) } buildSlicing8TablesOnce() crc = ^crc // table comparison is somewhat expensive, so avoid it for small sizes - for len(p) >= 64 { + if len(p) >= 64 { var helperTable = slicing8TableNVME // Update using slicing-by-8 for len(p) > 8 { diff --git a/vendor/github.com/minio/crc64nvme/crc64_amd64.go b/vendor/github.com/minio/crc64nvme/crc64_amd64.go index fc8538bc3e3..c741591a619 100644 --- a/vendor/github.com/minio/crc64nvme/crc64_amd64.go +++ b/vendor/github.com/minio/crc64nvme/crc64_amd64.go @@ -11,5 +11,7 @@ import ( ) var hasAsm = cpuid.CPU.Supports(cpuid.SSE2, cpuid.CLMUL, cpuid.SSE4) +var hasAsm512 = cpuid.CPU.Supports(cpuid.AVX512F, cpuid.VPCLMULQDQ, cpuid.AVX512VL, cpuid.CLMUL) func updateAsm(crc uint64, p []byte) (checksum uint64) +func updateAsm512(crc uint64, p []byte) (checksum uint64) diff --git a/vendor/github.com/minio/crc64nvme/crc64_amd64.s b/vendor/github.com/minio/crc64nvme/crc64_amd64.s index 9782321fd0c..acfea6a151b 100644 --- a/vendor/github.com/minio/crc64nvme/crc64_amd64.s +++ b/vendor/github.com/minio/crc64nvme/crc64_amd64.s @@ -155,3 +155,153 @@ skip128: NOTQ AX MOVQ AX, checksum+32(FP) RET + +// Constants, pre-splatted. +DATA ·asmConstantsPoly<>+0x00(SB)/8, $0xa1ca681e733f9c40 +DATA ·asmConstantsPoly<>+0x08(SB)/8, $0 +DATA ·asmConstantsPoly<>+0x10(SB)/8, $0xa1ca681e733f9c40 +DATA ·asmConstantsPoly<>+0x18(SB)/8, $0 +DATA ·asmConstantsPoly<>+0x20(SB)/8, $0xa1ca681e733f9c40 +DATA ·asmConstantsPoly<>+0x28(SB)/8, $0 +DATA ·asmConstantsPoly<>+0x30(SB)/8, $0xa1ca681e733f9c40 +DATA ·asmConstantsPoly<>+0x38(SB)/8, $0 +// Upper +DATA ·asmConstantsPoly<>+0x40(SB)/8, $0 +DATA ·asmConstantsPoly<>+0x48(SB)/8, $0x5f852fb61e8d92dc +DATA ·asmConstantsPoly<>+0x50(SB)/8, $0 +DATA ·asmConstantsPoly<>+0x58(SB)/8, $0x5f852fb61e8d92dc +DATA ·asmConstantsPoly<>+0x60(SB)/8, $0 +DATA ·asmConstantsPoly<>+0x68(SB)/8, $0x5f852fb61e8d92dc +DATA ·asmConstantsPoly<>+0x70(SB)/8, $0 +DATA ·asmConstantsPoly<>+0x78(SB)/8, $0x5f852fb61e8d92dc +GLOBL ·asmConstantsPoly<>(SB), (NOPTR+RODATA), $128 + +TEXT ·updateAsm512(SB), $0-40 + MOVQ crc+0(FP), AX // checksum + MOVQ p_base+8(FP), SI // start pointer + MOVQ p_len+16(FP), CX // length of buffer + NOTQ AX + SHRQ $7, CX + CMPQ CX, $1 + VPXORQ Z8, Z8, Z8 // Initialize ZMM8 to zero + JLT skip128 + + VMOVDQU64 0x00(SI), Z0 + VMOVDQU64 0x40(SI), Z4 + MOVQ $·asmConstantsPoly<>(SB), BX + VMOVQ AX, X8 + + // XOR initialization value into lower 64 bits of ZMM0 + VPXORQ Z8, Z0, Z0 + CMPQ CX, $1 + JE tail128 + + VMOVDQU64 0(BX), Z8 + VMOVDQU64 64(BX), Z9 + + PCALIGN $16 + +loop128: + VMOVDQU64 0x80(SI), Z1 + VMOVDQU64 0xc0(SI), Z5 + ADDQ $128, SI + + SUBQ $1, CX + VPCLMULQDQ $0x00, Z8, Z0, Z10 + VPCLMULQDQ $0x11, Z9, Z0, Z0 + VPTERNLOGD $0x96, Z1, Z10, Z0 // Combine results with xor into Z0 + + VPCLMULQDQ $0x00, Z8, Z4, Z10 + VPCLMULQDQ $0x11, Z9, Z4, Z4 + VPTERNLOGD $0x96, Z5, Z10, Z4 // Combine results with xor into Z4 + + CMPQ CX, $1 + JGT loop128 + +tail128: + // Extract X0 to X3 from ZMM0 + VEXTRACTF32X4 $1, Z0, X1 // X1: Second 128-bit lane + VEXTRACTF32X4 $2, Z0, X2 // X2: Third 128-bit lane + VEXTRACTF32X4 $3, Z0, X3 // X3: Fourth 128-bit lane + + // Extract X4 to X7 from ZMM4 + VEXTRACTF32X4 $1, Z4, X5 // X5: Second 128-bit lane + VEXTRACTF32X4 $2, Z4, X6 // X6: Third 128-bit lane + VEXTRACTF32X4 $3, Z4, X7 // X7: Fourth 128-bit lane + + MOVQ $0xd083dd594d96319d, AX + MOVQ AX, X11 + PCLMULQDQ $0x00, X0, X11 + MOVQ $0x946588403d4adcbc, AX + PINSRQ $0x1, AX, X12 + PCLMULQDQ $0x11, X12, X0 + PXOR X11, X7 + PXOR X0, X7 + MOVQ $0x3c255f5ebc414423, AX + MOVQ AX, X11 + PCLMULQDQ $0x00, X1, X11 + MOVQ $0x34f5a24e22d66e90, AX + PINSRQ $0x1, AX, X12 + PCLMULQDQ $0x11, X12, X1 + PXOR X11, X1 + PXOR X7, X1 + MOVQ $0x7b0ab10dd0f809fe, AX + MOVQ AX, X11 + PCLMULQDQ $0x00, X2, X11 + MOVQ $0x03363823e6e791e5, AX + PINSRQ $0x1, AX, X12 + PCLMULQDQ $0x11, X12, X2 + PXOR X11, X2 + PXOR X1, X2 + MOVQ $0x0c32cdb31e18a84a, AX + MOVQ AX, X11 + PCLMULQDQ $0x00, X3, X11 + MOVQ $0x62242240ace5045a, AX + PINSRQ $0x1, AX, X12 + PCLMULQDQ $0x11, X12, X3 + PXOR X11, X3 + PXOR X2, X3 + MOVQ $0xbdd7ac0ee1a4a0f0, AX + MOVQ AX, X11 + PCLMULQDQ $0x00, X4, X11 + MOVQ $0xa3ffdc1fe8e82a8b, AX + PINSRQ $0x1, AX, X12 + PCLMULQDQ $0x11, X12, X4 + PXOR X11, X4 + PXOR X3, X4 + MOVQ $0xb0bc2e589204f500, AX + MOVQ AX, X11 + PCLMULQDQ $0x00, X5, X11 + MOVQ $0xe1e0bb9d45d7a44c, AX + PINSRQ $0x1, AX, X12 + PCLMULQDQ $0x11, X12, X5 + PXOR X11, X5 + PXOR X4, X5 + MOVQ $0xeadc41fd2ba3d420, AX + MOVQ AX, X11 + PCLMULQDQ $0x00, X6, X11 + MOVQ $0x21e9761e252621ac, AX + PINSRQ $0x1, AX, X12 + PCLMULQDQ $0x11, X12, X6 + PXOR X11, X6 + PXOR X5, X6 + MOVQ AX, X5 + PCLMULQDQ $0x00, X6, X5 + PSHUFD $0xee, X6, X6 + PXOR X5, X6 + MOVQ $0x27ecfa329aef9f77, AX + MOVQ AX, X4 + PCLMULQDQ $0x00, X4, X6 + PEXTRQ $0, X6, BX + MOVQ $0x34d926535897936b, AX + MOVQ AX, X4 + PCLMULQDQ $0x00, X4, X6 + PXOR X5, X6 + PEXTRQ $1, X6, AX + XORQ BX, AX + +skip128: + NOTQ AX + MOVQ AX, checksum+32(FP) + VZEROUPPER + RET diff --git a/vendor/github.com/minio/crc64nvme/crc64_arm64.go b/vendor/github.com/minio/crc64nvme/crc64_arm64.go index 141ecf54d39..7e3ea913459 100644 --- a/vendor/github.com/minio/crc64nvme/crc64_arm64.go +++ b/vendor/github.com/minio/crc64nvme/crc64_arm64.go @@ -11,5 +11,7 @@ import ( ) var hasAsm = cpuid.CPU.Supports(cpuid.ASIMD, cpuid.PMULL, cpuid.SHA3) +var hasAsm512 = false func updateAsm(crc uint64, p []byte) (checksum uint64) +func updateAsm512(crc uint64, p []byte) (checksum uint64) { panic("should not be reached") } diff --git a/vendor/github.com/minio/crc64nvme/crc64_other.go b/vendor/github.com/minio/crc64nvme/crc64_other.go index 467958c69dd..ae260f7fbdb 100644 --- a/vendor/github.com/minio/crc64nvme/crc64_other.go +++ b/vendor/github.com/minio/crc64nvme/crc64_other.go @@ -7,5 +7,7 @@ package crc64nvme var hasAsm = false +var hasAsm512 = false -func updateAsm(crc uint64, p []byte) (checksum uint64) { panic("should not be reached") } +func updateAsm(crc uint64, p []byte) (checksum uint64) { panic("should not be reached") } +func updateAsm512(crc uint64, p []byte) (checksum uint64) { panic("should not be reached") } diff --git a/vendor/github.com/minio/highwayhash/highwayhash.go b/vendor/github.com/minio/highwayhash/highwayhash.go index 629fdf2e1b0..2e59a61f1bd 100644 --- a/vendor/github.com/minio/highwayhash/highwayhash.go +++ b/vendor/github.com/minio/highwayhash/highwayhash.go @@ -25,39 +25,64 @@ const ( Size64 = 8 ) +// These will error at compile time if the interface is not conformant. +var _ hash.Hash = &Digest{} +var _ hash.Hash = &Digest64{} + var errKeySize = errors.New("highwayhash: invalid key size") // New returns a hash.Hash computing the HighwayHash-256 checksum. // It returns a non-nil error if the key is not 32 bytes long. func New(key []byte) (hash.Hash, error) { - if len(key) != Size { - return nil, errKeySize - } - h := &digest{size: Size} - copy(h.key[:], key) - h.Reset() - return h, nil + return NewDigest(key) } // New128 returns a hash.Hash computing the HighwayHash-128 checksum. // It returns a non-nil error if the key is not 32 bytes long. func New128(key []byte) (hash.Hash, error) { + return NewDigest128(key) +} + +// New64 returns a hash.Hash64 computing the HighwayHash-64 checksum. +// It returns a non-nil error if the key is not 32 bytes long. +func New64(key []byte) (hash.Hash64, error) { + return NewDigest64(key) +} + +// NewDigest returns a *Digest that conforms to hash.Hash computing +// the HighwayHash-256 checksum. +// It returns a non-nil error if the key is not 32 bytes long. +func NewDigest(key []byte) (*Digest, error) { if len(key) != Size { return nil, errKeySize } - h := &digest{size: Size128} + h := &Digest{size: Size} copy(h.key[:], key) h.Reset() return h, nil } -// New64 returns a hash.Hash computing the HighwayHash-64 checksum. +// NewDigest128 returns a *Digest that conforms to hash.Hash computing +// the HighwayHash-128 checksum. // It returns a non-nil error if the key is not 32 bytes long. -func New64(key []byte) (hash.Hash64, error) { +func NewDigest128(key []byte) (*Digest, error) { if len(key) != Size { return nil, errKeySize } - h := new(digest64) + h := &Digest{size: Size128} + copy(h.key[:], key) + h.Reset() + return h, nil +} + +// NewDigest64 returns a *Digest that conforms to hash.Hash computing +// the HighwayHash-64 checksum. +// It returns a non-nil error if the key is not 32 bytes long. +func NewDigest64(key []byte) (*Digest64, error) { + if len(key) != Size { + return nil, errKeySize + } + h := new(Digest64) h.size = Size64 copy(h.key[:], key) h.Reset() @@ -130,9 +155,9 @@ func Sum64(data, key []byte) uint64 { return binary.LittleEndian.Uint64(hash[:]) } -type digest64 struct{ digest } +type Digest64 struct{ Digest } -func (d *digest64) Sum64() uint64 { +func (d *Digest64) Sum64() uint64 { state := d.state if d.offset > 0 { hashBuffer(&state, &d.buffer, d.offset) @@ -142,7 +167,7 @@ func (d *digest64) Sum64() uint64 { return binary.LittleEndian.Uint64(hash[:]) } -type digest struct { +type Digest struct { state [16]uint64 // v0 | v1 | mul0 | mul1 key, buffer [Size]byte @@ -151,16 +176,16 @@ type digest struct { size int } -func (d *digest) Size() int { return d.size } +func (d *Digest) Size() int { return d.size } -func (d *digest) BlockSize() int { return Size } +func (d *Digest) BlockSize() int { return Size } -func (d *digest) Reset() { +func (d *Digest) Reset() { initialize(&d.state, d.key[:]) d.offset = 0 } -func (d *digest) Write(p []byte) (n int, err error) { +func (d *Digest) Write(p []byte) (n int, err error) { n = len(p) if d.offset > 0 { remaining := Size - d.offset @@ -183,7 +208,7 @@ func (d *digest) Write(p []byte) (n int, err error) { return } -func (d *digest) Sum(b []byte) []byte { +func (d *Digest) Sum(b []byte) []byte { state := d.state if d.offset > 0 { hashBuffer(&state, &d.buffer, d.offset) diff --git a/vendor/github.com/minio/highwayhash/highwayhash_arm64.go b/vendor/github.com/minio/highwayhash/highwayhash_arm64.go index d94e482d2d7..dfff06e873d 100644 --- a/vendor/github.com/minio/highwayhash/highwayhash_arm64.go +++ b/vendor/github.com/minio/highwayhash/highwayhash_arm64.go @@ -24,7 +24,7 @@ func init() { if useSVE { if vl, _ := getVectorLength(); vl != 256 { // - // Since HighwahHash is designed for AVX2, + // Since HighwayHash is designed for AVX2, // SVE/SVE2 instructions only run correctly // for vector length of 256 // diff --git a/vendor/github.com/minio/minio-go/v7/CLAUDE.md b/vendor/github.com/minio/minio-go/v7/CLAUDE.md new file mode 100644 index 00000000000..26ff953237b --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/CLAUDE.md @@ -0,0 +1,125 @@ +CLAUDE.md +========= + +This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. + +Commands +-------- + +### Testing + +```bash +# Run all tests with race detection (requires MinIO server at localhost:9000) +SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minioadmin SECRET_KEY=minioadmin ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./... + +# Run tests without race detection +go test ./... + +# Run short tests only (no functional tests) +go test -short -race ./... + +# Run functional tests +go build -race functional_tests.go +SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minioadmin SECRET_KEY=minioadmin ENABLE_HTTPS=1 MINT_MODE=full ./functional_tests + +# Run functional tests without TLS +SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minioadmin SECRET_KEY=minioadmin ENABLE_HTTPS=0 MINT_MODE=full ./functional_tests +``` + +### Linting and Code Quality + +```bash +# Run all checks (lint, vet, test, examples, functional tests) +make checks + +# Run linter only +make lint + +# Run vet and staticcheck +make vet + +# Alternative: run golangci-lint directly +golangci-lint run --timeout=5m --config ./.golangci.yml +``` + +### Building Examples + +```bash +# Build all examples +make examples + +# Build a specific example +cd examples/s3 && go build -mod=mod putobject.go +``` + +Architecture +------------ + +### Core Client Structure + +The MinIO Go SDK is organized around a central `Client` struct (api.go:52) that implements Amazon S3 compatible methods. Key architectural patterns: + +1. **Modular API Organization**: API methods are split into logical files: + + - `api-bucket-*.go`: Bucket operations (lifecycle, encryption, versioning, etc.) + - `api-object-*.go`: Object operations (legal hold, retention, tagging, etc.) + - `api-get-*.go`, `api-put-*.go`: GET and PUT operations + - `api-list.go`: Listing operations + - `api-stat.go`: Status/info operations + +2. **Credential Management**: The `pkg/credentials/` package provides various credential providers: + + - Static credentials + - Environment variables (AWS/MinIO) + - IAM roles + - STS (Security Token Service) variants + - File-based credentials + - Chain provider for fallback mechanisms + +3. **Request Signing**: The `pkg/signer/` package handles AWS signature versions: + + - V2 signatures (legacy) + - V4 signatures (standard) + - Streaming signatures for large uploads + +4. **Transport Layer**: Custom HTTP transport with: + + - Retry logic with configurable max retries + - Health status monitoring + - Tracing support via httptrace + - Bucket location caching (`bucketLocCache`\) + - Session caching for credentials + +5. **Helper Packages**: + + - `pkg/encrypt/`: Server-side encryption utilities + - `pkg/notification/`: Event notification handling + - `pkg/policy/`: Bucket policy management + - `pkg/lifecycle/`: Object lifecycle rules + - `pkg/tags/`: Object and bucket tagging + - `pkg/s3utils/`: S3 utility functions + - `pkg/kvcache/`: Key-value caching + - `pkg/singleflight/`: Deduplication of concurrent requests + +### Testing Strategy + +- Unit tests alongside implementation files (`*_test.go`\) +- Comprehensive functional tests in `functional_tests.go` requiring a live MinIO server +- Example programs in `examples/` directory demonstrating API usage +- Build tag `//go:build mint` for integration tests + +### Error Handling + +- Custom error types in `api-error-response.go` +- HTTP status code mapping +- Retry logic for transient failures +- Detailed error context preservation + +Important Patterns +------------------ + +1. **Context Usage**: All API methods accept `context.Context` for cancellation and timeout control +2. **Options Pattern**: Methods use Options structs for optional parameters (e.g., `PutObjectOptions`, `GetObjectOptions`\) +3. **Streaming Support**: Large file operations use io.Reader/Writer interfaces for memory efficiency +4. **Bucket Lookup Types**: Supports both path-style and virtual-host-style S3 URLs +5. **MD5/SHA256 Hashing**: Configurable hash functions for integrity checks via `md5Hasher` and `sha256Hasher` diff --git a/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md b/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md index 24522ef75a1..e976dd6befc 100644 --- a/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md +++ b/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md @@ -1,22 +1,23 @@ -### Developer Guidelines +### Developer Guidelines -``minio-go`` welcomes your contribution. To make the process as seamless as possible, we ask for the following: +`minio-go` welcomes your contribution. To make the process as seamless as possible, we ask for the following: -* Go ahead and fork the project and make your changes. We encourage pull requests to discuss code changes. - - Fork it - - Create your feature branch (git checkout -b my-new-feature) - - Commit your changes (git commit -am 'Add some feature') - - Push to the branch (git push origin my-new-feature) - - Create new Pull Request +- Go ahead and fork the project and make your changes. We encourage pull requests to discuss code changes. -* When you're ready to create a pull request, be sure to: - - Have test cases for the new code. If you have questions about how to do it, please ask in your pull request. - - Run `go fmt` - - Squash your commits into a single commit. `git rebase -i`. It's okay to force update your pull request. - - Make sure `go test -race ./...` and `go build` completes. - NOTE: go test runs functional tests and requires you to have a AWS S3 account. Set them as environment variables - ``ACCESS_KEY`` and ``SECRET_KEY``. To run shorter version of the tests please use ``go test -short -race ./...`` + - Fork it + - Create your feature branch (git checkout -b my-new-feature) + - Commit your changes (git commit -am 'Add some feature') + - Push to the branch (git push origin my-new-feature) + - Create new Pull Request -* Read [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project - - `minio-go` project is strictly conformant with Golang style - - if you happen to observe offending code, please feel free to send a pull request +- When you're ready to create a pull request, be sure to: + + - Have test cases for the new code. If you have questions about how to do it, please ask in your pull request. + - Run `go fmt` + - Squash your commits into a single commit. `git rebase -i`. It's okay to force update your pull request. + - Make sure `go test -race ./...` and `go build` completes. NOTE: go test runs functional tests and requires you to have a AWS S3 account. Set them as environment variables`ACCESS_KEY` and `SECRET_KEY`. To run shorter version of the tests please use `go test -short -race ./...` + +- Read [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project + + - `minio-go` project is strictly conformant with Golang style + - if you happen to observe offending code, please feel free to send a pull request diff --git a/vendor/github.com/minio/minio-go/v7/MAINTAINERS.md b/vendor/github.com/minio/minio-go/v7/MAINTAINERS.md index f640dfb9f86..9b189373f77 100644 --- a/vendor/github.com/minio/minio-go/v7/MAINTAINERS.md +++ b/vendor/github.com/minio/minio-go/v7/MAINTAINERS.md @@ -1,11 +1,15 @@ -# For maintainers only +For maintainers only +==================== -## Responsibilities +Responsibilities +---------------- Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522) ### Making new releases + Tag and sign your release commit, additionally this step requires you to have access to MinIO's trusted private key. + ```sh $ export GNUPGHOME=/media/${USER}/minio/trusted $ git tag -s 4.0.0 @@ -14,6 +18,7 @@ $ git push --tags ``` ### Update version + Once release has been made update `libraryVersion` constant in `api.go` to next to be released version. ```sh @@ -22,14 +27,17 @@ $ grep libraryVersion api.go ``` Commit your changes + ``` $ git commit -a -m "Update version for next release" --author "MinIO Trusted " ``` ### Announce + Announce new release by adding release notes at https://github.com/minio/minio-go/releases from `trusted@min.io` account. Release notes requires two sections `highlights` and `changelog`. Highlights is a bulleted list of salient features in this release and Changelog contains list of all commits since the last release. To generate `changelog` + ```sh $ git log --no-color --pretty=format:'-%d %s (%cr) <%an>' .. ``` diff --git a/vendor/github.com/minio/minio-go/v7/README.md b/vendor/github.com/minio/minio-go/v7/README.md index be7963c52e6..36c1004c9ac 100644 --- a/vendor/github.com/minio/minio-go/v7/README.md +++ b/vendor/github.com/minio/minio-go/v7/README.md @@ -1,13 +1,14 @@ -# MinIO Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) [![Apache V2 License](https://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/minio/minio-go/blob/master/LICENSE) +MinIO Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) [![Apache V2 License](https://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/minio/minio-go/blob/master/LICENSE) +================================================================================================================================================================================================================================================================================================================================================================================================================== The MinIO Go Client SDK provides straightforward APIs to access any Amazon S3 compatible object storage. -This Quickstart Guide covers how to install the MinIO client SDK, connect to MinIO, and create a sample file uploader. -For a complete list of APIs and examples, see the [godoc documentation](https://pkg.go.dev/github.com/minio/minio-go/v7) or [Go Client API Reference](https://min.io/docs/minio/linux/developers/go/API.html). +This Quickstart Guide covers how to install the MinIO client SDK, connect to MinIO, and create a sample file uploader. For a complete list of APIs and examples, see the [godoc documentation](https://pkg.go.dev/github.com/minio/minio-go/v7) or [Go Client API Reference](https://min.io/docs/minio/linux/developers/go/API.html). These examples presume a working [Go development environment](https://golang.org/doc/install) and the [MinIO `mc` command line tool](https://min.io/docs/minio/linux/reference/minio-mc.html). -## Download from Github +Download from Github +-------------------- From your project directory: @@ -15,12 +16,13 @@ From your project directory: go get github.com/minio/minio-go/v7 ``` -## Initialize a MinIO Client Object +Initialize a MinIO Client Object +-------------------------------- The MinIO client requires the following parameters to connect to an Amazon S3 compatible object storage: | Parameter | Description | -| ----------------- | ---------------------------------------------------------- | +|-------------------|------------------------------------------------------------| | `endpoint` | URL to object storage service. | | `_minio.Options_` | All the options such as credentials, custom transport etc. | @@ -53,83 +55,82 @@ func main() { } ``` -## Example - File Uploader +Example - File Uploader +----------------------- -This sample code connects to an object storage server, creates a bucket, and uploads a file to the bucket. -It uses the MinIO `play` server, a public MinIO cluster located at [https://play.min.io](https://play.min.io). +This sample code connects to an object storage server, creates a bucket, and uploads a file to the bucket. It uses the MinIO `play` server, a public MinIO cluster located at [https://play.min.io](https://play.min.io). -The `play` server runs the latest stable version of MinIO and may be used for testing and development. -The access credentials shown in this example are open to the public and all data uploaded to `play` should be considered public and non-protected. +The `play` server runs the latest stable version of MinIO and may be used for testing and development. The access credentials shown in this example are open to the public and all data uploaded to `play` should be considered public and non-protected. ### FileUploader.go This example does the following: -- Connects to the MinIO `play` server using the provided credentials. -- Creates a bucket named `testbucket`. -- Uploads a file named `testdata` from `/tmp`. -- Verifies the file was created using `mc ls`. +- Connects to the MinIO `play` server using the provided credentials. +- Creates a bucket named `testbucket`. +- Uploads a file named `testdata` from `/tmp`. +- Verifies the file was created using `mc ls`. -```go -// FileUploader.go MinIO example -package main + ```go + // FileUploader.go MinIO example + package main -import ( - "context" - "log" + import ( + "context" + "log" - "github.com/minio/minio-go/v7" - "github.com/minio/minio-go/v7/pkg/credentials" -) + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" + ) -func main() { - ctx := context.Background() - endpoint := "play.min.io" - accessKeyID := "Q3AM3UQ867SPQQA43P2F" - secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" - useSSL := true + func main() { + ctx := context.Background() + endpoint := "play.min.io" + accessKeyID := "Q3AM3UQ867SPQQA43P2F" + secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" + useSSL := true - // Initialize minio client object. - minioClient, err := minio.New(endpoint, &minio.Options{ - Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""), - Secure: useSSL, - }) - if err != nil { - log.Fatalln(err) - } - - // Make a new bucket called testbucket. - bucketName := "testbucket" - location := "us-east-1" - - err = minioClient.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: location}) - if err != nil { - // Check to see if we already own this bucket (which happens if you run this twice) - exists, errBucketExists := minioClient.BucketExists(ctx, bucketName) - if errBucketExists == nil && exists { - log.Printf("We already own %s\n", bucketName) - } else { + // Initialize minio client object. + minioClient, err := minio.New(endpoint, &minio.Options{ + Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""), + Secure: useSSL, + }) + if err != nil { log.Fatalln(err) } - } else { - log.Printf("Successfully created %s\n", bucketName) + + // Make a new bucket called testbucket. + bucketName := "testbucket" + location := "us-east-1" + + err = minioClient.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: location}) + if err != nil { + // Check to see if we already own this bucket (which happens if you run this twice) + exists, errBucketExists := minioClient.BucketExists(ctx, bucketName) + if errBucketExists == nil && exists { + log.Printf("We already own %s\n", bucketName) + } else { + log.Fatalln(err) + } + } else { + log.Printf("Successfully created %s\n", bucketName) + } + + // Upload the test file + // Change the value of filePath if the file is in another location + objectName := "testdata" + filePath := "/tmp/testdata" + contentType := "application/octet-stream" + + // Upload the test file with FPutObject + info, err := minioClient.FPutObject(ctx, bucketName, objectName, filePath, minio.PutObjectOptions{ContentType: contentType}) + if err != nil { + log.Fatalln(err) + } + + log.Printf("Successfully uploaded %s of size %d\n", objectName, info.Size) } - - // Upload the test file - // Change the value of filePath if the file is in another location - objectName := "testdata" - filePath := "/tmp/testdata" - contentType := "application/octet-stream" - - // Upload the test file with FPutObject - info, err := minioClient.FPutObject(ctx, bucketName, objectName, filePath, minio.PutObjectOptions{ContentType: contentType}) - if err != nil { - log.Fatalln(err) - } - - log.Printf("Successfully uploaded %s of size %d\n", objectName, info.Size) -} -``` + ``` **1. Create a test file containing data:** @@ -168,145 +169,150 @@ mc ls play/testbucket [2023-11-01 14:27:55 UTC] 20KiB STANDARD TestDataFile ``` -## API Reference +API Reference +------------- The full API Reference is available here. -* [Complete API Reference](https://min.io/docs/minio/linux/developers/go/API.html) +- [Complete API Reference](https://min.io/docs/minio/linux/developers/go/API.html) ### API Reference : Bucket Operations -* [`MakeBucket`](https://min.io/docs/minio/linux/developers/go/API.html#MakeBucket) -* [`ListBuckets`](https://min.io/docs/minio/linux/developers/go/API.html#ListBuckets) -* [`BucketExists`](https://min.io/docs/minio/linux/developers/go/API.html#BucketExists) -* [`RemoveBucket`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveBucket) -* [`ListObjects`](https://min.io/docs/minio/linux/developers/go/API.html#ListObjects) -* [`ListIncompleteUploads`](https://min.io/docs/minio/linux/developers/go/API.html#ListIncompleteUploads) +- [`MakeBucket`](https://min.io/docs/minio/linux/developers/go/API.html#MakeBucket) +- [`ListBuckets`](https://min.io/docs/minio/linux/developers/go/API.html#ListBuckets) +- [`BucketExists`](https://min.io/docs/minio/linux/developers/go/API.html#BucketExists) +- [`RemoveBucket`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveBucket) +- [`ListObjects`](https://min.io/docs/minio/linux/developers/go/API.html#ListObjects) +- [`ListIncompleteUploads`](https://min.io/docs/minio/linux/developers/go/API.html#ListIncompleteUploads) ### API Reference : Bucket policy Operations -* [`SetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketPolicy) -* [`GetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketPolicy) +- [`SetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketPolicy) +- [`GetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketPolicy) ### API Reference : Bucket notification Operations -* [`SetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketNotification) -* [`GetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketNotification) -* [`RemoveAllBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveAllBucketNotification) -* [`ListenBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenBucketNotification) (MinIO Extension) -* [`ListenNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenNotification) (MinIO Extension) +- [`SetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketNotification) +- [`GetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketNotification) +- [`RemoveAllBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveAllBucketNotification) +- [`ListenBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenBucketNotification) (MinIO Extension) +- [`ListenNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenNotification) (MinIO Extension) ### API Reference : File Object Operations -* [`FPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#FPutObject) -* [`FGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#FGetObject) +- [`FPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#FPutObject) +- [`FGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#FGetObject) ### API Reference : Object Operations -* [`GetObject`](https://min.io/docs/minio/linux/developers/go/API.html#GetObject) -* [`PutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PutObject) -* [`PutObjectStreaming`](https://min.io/docs/minio/linux/developers/go/API.html#PutObjectStreaming) -* [`StatObject`](https://min.io/docs/minio/linux/developers/go/API.html#StatObject) -* [`CopyObject`](https://min.io/docs/minio/linux/developers/go/API.html#CopyObject) -* [`RemoveObject`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveObject) -* [`RemoveObjects`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveObjects) -* [`RemoveIncompleteUpload`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveIncompleteUpload) -* [`SelectObjectContent`](https://min.io/docs/minio/linux/developers/go/API.html#SelectObjectContent) +- [`GetObject`](https://min.io/docs/minio/linux/developers/go/API.html#GetObject) +- [`PutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PutObject) +- [`PutObjectStreaming`](https://min.io/docs/minio/linux/developers/go/API.html#PutObjectStreaming) +- [`StatObject`](https://min.io/docs/minio/linux/developers/go/API.html#StatObject) +- [`CopyObject`](https://min.io/docs/minio/linux/developers/go/API.html#CopyObject) +- [`RemoveObject`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveObject) +- [`RemoveObjects`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveObjects) +- [`RemoveIncompleteUpload`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveIncompleteUpload) +- [`SelectObjectContent`](https://min.io/docs/minio/linux/developers/go/API.html#SelectObjectContent) ### API Reference : Presigned Operations -* [`PresignedGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedGetObject) -* [`PresignedPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPutObject) -* [`PresignedHeadObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedHeadObject) -* [`PresignedPostPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPostPolicy) +- [`PresignedGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedGetObject) +- [`PresignedPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPutObject) +- [`PresignedHeadObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedHeadObject) +- [`PresignedPostPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPostPolicy) ### API Reference : Client custom settings -* [`SetAppInfo`](https://min.io/docs/minio/linux/developers/go/API.html#SetAppInfo) -* [`TraceOn`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOn) -* [`TraceOff`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOff) +- [`SetAppInfo`](https://min.io/docs/minio/linux/developers/go/API.html#SetAppInfo) +- [`TraceOn`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOn) +- [`TraceOff`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOff) -## Full Examples +Full Examples +------------- ### Full Examples : Bucket Operations -* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go) -* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go) -* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go) -* [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go) -* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go) -* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go) -* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go) +- [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go) +- [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go) +- [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go) +- [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go) +- [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go) +- [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go) +- [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go) ### Full Examples : Bucket policy Operations -* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go) -* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go) -* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go) +- [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go) +- [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go) +- [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go) ### Full Examples : Bucket lifecycle Operations -* [setbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketlifecycle.go) -* [getbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketlifecycle.go) +- [setbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketlifecycle.go) +- [getbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketlifecycle.go) ### Full Examples : Bucket encryption Operations -* [setbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketencryption.go) -* [getbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketencryption.go) -* [removebucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucketencryption.go) +- [setbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketencryption.go) +- [getbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketencryption.go) +- [removebucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucketencryption.go) ### Full Examples : Bucket replication Operations -* [setbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketreplication.go) -* [getbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketreplication.go) -* [removebucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucketreplication.go) +- [setbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketreplication.go) +- [getbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketreplication.go) +- [removebucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucketreplication.go) ### Full Examples : Bucket notification Operations -* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go) -* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go) -* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go) -* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (MinIO Extension) -* [listennotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listen-notification.go) (MinIO Extension) +- [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go) +- [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go) +- [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go) +- [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (MinIO Extension) +- [listennotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listen-notification.go) (MinIO Extension) ### Full Examples : File Object Operations -* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go) -* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go) +- [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go) +- [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go) ### Full Examples : Object Operations -* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go) -* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go) -* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go) -* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go) -* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go) -* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go) -* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go) +- [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go) +- [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go) +- [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go) +- [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go) +- [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go) +- [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go) +- [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go) ### Full Examples : Encrypted Object Operations -* [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go) -* [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go) -* [fput-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputencrypted-object.go) +- [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go) +- [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go) +- [fput-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputencrypted-object.go) ### Full Examples : Presigned Operations -* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go) -* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go) -* [presignedheadobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedheadobject.go) -* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go) +- [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go) +- [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go) +- [presignedheadobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedheadobject.go) +- [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go) -## Explore Further +Explore Further +--------------- -* [Godoc Documentation](https://pkg.go.dev/github.com/minio/minio-go/v7) -* [Complete Documentation](https://min.io/docs/minio/kubernetes/upstream/index.html) -* [MinIO Go Client SDK API Reference](https://min.io/docs/minio/linux/developers/go/API.html) +- [Godoc Documentation](https://pkg.go.dev/github.com/minio/minio-go/v7) +- [Complete Documentation](https://min.io/docs/minio/kubernetes/upstream/index.html) +- [MinIO Go Client SDK API Reference](https://min.io/docs/minio/linux/developers/go/API.html) -## Contribute +Contribute +---------- [Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md) -## License +License +------- This SDK is distributed under the [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0), see [LICENSE](https://github.com/minio/minio-go/blob/master/LICENSE) and [NOTICE](https://github.com/minio/minio-go/blob/master/NOTICE) for more information. diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-cors.go b/vendor/github.com/minio/minio-go/v7/api-bucket-cors.go index 9d514947dd1..9967fe39e63 100644 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-cors.go +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-cors.go @@ -26,7 +26,15 @@ import ( "github.com/minio/minio-go/v7/pkg/s3utils" ) -// SetBucketCors sets the cors configuration for the bucket +// SetBucketCors sets the Cross-Origin Resource Sharing (CORS) configuration for the bucket. +// If corsConfig is nil, the existing CORS configuration will be removed. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// - corsConfig: CORS configuration to apply (nil to remove existing configuration) +// +// Returns an error if the operation fails. func (c *Client) SetBucketCors(ctx context.Context, bucketName string, corsConfig *cors.Config) error { if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err @@ -90,7 +98,14 @@ func (c *Client) removeBucketCors(ctx context.Context, bucketName string) error return nil } -// GetBucketCors returns the current cors +// GetBucketCors retrieves the Cross-Origin Resource Sharing (CORS) configuration from the bucket. +// If no CORS configuration exists, returns nil with no error. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// +// Returns the CORS configuration or an error if the operation fails. func (c *Client) GetBucketCors(ctx context.Context, bucketName string) (*cors.Config, error) { if err := s3utils.CheckValidBucketName(bucketName); err != nil { return nil, err diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go b/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go index 24f94e03440..3ae9fe2792b 100644 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go @@ -28,6 +28,14 @@ import ( ) // SetBucketEncryption sets the default encryption configuration on an existing bucket. +// The encryption configuration specifies the default encryption behavior for objects uploaded to the bucket. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// - config: Server-side encryption configuration to apply +// +// Returns an error if the operation fails or if config is nil. func (c *Client) SetBucketEncryption(ctx context.Context, bucketName string, config *sse.Configuration) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { @@ -69,7 +77,15 @@ func (c *Client) SetBucketEncryption(ctx context.Context, bucketName string, con return nil } -// RemoveBucketEncryption removes the default encryption configuration on a bucket with a context to control cancellations and timeouts. +// RemoveBucketEncryption removes the default encryption configuration from a bucket. +// After removal, the bucket will no longer apply default encryption to new objects. +// It uses the provided context to control cancellations and timeouts. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// +// Returns an error if the operation fails. func (c *Client) RemoveBucketEncryption(ctx context.Context, bucketName string) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { @@ -97,8 +113,14 @@ func (c *Client) RemoveBucketEncryption(ctx context.Context, bucketName string) return nil } -// GetBucketEncryption gets the default encryption configuration -// on an existing bucket with a context to control cancellations and timeouts. +// GetBucketEncryption retrieves the default encryption configuration from a bucket. +// It uses the provided context to control cancellations and timeouts. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// +// Returns the bucket's encryption configuration or an error if the operation fails. func (c *Client) GetBucketEncryption(ctx context.Context, bucketName string) (*sse.Configuration, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go b/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go index 0d601104226..9e2a6776918 100644 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go @@ -21,12 +21,12 @@ import ( "bufio" "bytes" "context" + "encoding/json" "encoding/xml" "net/http" "net/url" "time" - "github.com/minio/minio-go/v7/internal/json" "github.com/minio/minio-go/v7/pkg/notification" "github.com/minio/minio-go/v7/pkg/s3utils" ) diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go b/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go index 3a168c13eee..0e561bdfab2 100644 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go @@ -26,7 +26,16 @@ import ( "github.com/minio/minio-go/v7/pkg/s3utils" ) -// SetBucketPolicy sets the access permissions on an existing bucket. +// SetBucketPolicy sets the access permissions policy on an existing bucket. +// The policy should be a valid JSON string that conforms to the IAM policy format. +// If policy is an empty string, the existing bucket policy will be removed. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// - policy: JSON policy string (empty string to remove existing policy) +// +// Returns an error if the operation fails. func (c *Client) SetBucketPolicy(ctx context.Context, bucketName, policy string) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { @@ -95,7 +104,14 @@ func (c *Client) removeBucketPolicy(ctx context.Context, bucketName string) erro return nil } -// GetBucketPolicy returns the current policy +// GetBucketPolicy retrieves the access permissions policy for the bucket. +// If no bucket policy exists, returns an empty string with no error. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// +// Returns the policy as a JSON string or an error if the operation fails. func (c *Client) GetBucketPolicy(ctx context.Context, bucketName string) (string, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-qos.go b/vendor/github.com/minio/minio-go/v7/api-bucket-qos.go new file mode 100644 index 00000000000..d1493a5b9a4 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-qos.go @@ -0,0 +1,212 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2025 MinIO, Inc. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "encoding/json" + "io" + "net/http" + "net/url" + "strings" + + "github.com/minio/minio-go/v7/pkg/s3utils" + "gopkg.in/yaml.v3" +) + +// QOSConfigVersionCurrent is the current version of the QoS configuration. +const QOSConfigVersionCurrent = "v1" + +// QOSConfig represents the QoS configuration for a bucket. +type QOSConfig struct { + Version string `yaml:"version"` + Rules []QOSRule `yaml:"rules"` +} + +// QOSRule represents a single QoS rule. +type QOSRule struct { + ID string `yaml:"id"` + Label string `yaml:"label,omitempty"` + Priority int `yaml:"priority"` + ObjectPrefix string `yaml:"objectPrefix"` + API string `yaml:"api"` + Rate int64 `yaml:"rate"` + Burst int64 `yaml:"burst"` // not required for concurrency limit + Limit string `yaml:"limit"` // "concurrency" or "rps" +} + +// NewQOSConfig creates a new empty QoS configuration. +func NewQOSConfig() *QOSConfig { + return &QOSConfig{ + Version: "v1", + Rules: []QOSRule{}, + } +} + +// GetBucketQOS retrieves the Quality of Service (QoS) configuration for the bucket. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucket: Name of the bucket +// +// Returns the QoS configuration or an error if the operation fails. +func (c *Client) GetBucketQOS(ctx context.Context, bucket string) (*QOSConfig, error) { + var qosCfg QOSConfig + // Input validation. + if err := s3utils.CheckValidBucketName(bucket); err != nil { + return nil, err + } + urlValues := make(url.Values) + urlValues.Set("qos", "") + // Execute GET on bucket to fetch qos. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucket, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucket, "") + } + b, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if err = yaml.Unmarshal(b, &qosCfg); err != nil { + return nil, err + } + + return &qosCfg, nil +} + +// SetBucketQOS sets the Quality of Service (QoS) configuration for a bucket. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucket: Name of the bucket +// - qosCfg: QoS configuration to apply +// +// Returns an error if the operation fails. +func (c *Client) SetBucketQOS(ctx context.Context, bucket string, qosCfg *QOSConfig) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucket); err != nil { + return err + } + + data, err := yaml.Marshal(qosCfg) + if err != nil { + return err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("qos", "") + + reqMetadata := requestMetadata{ + bucketName: bucket, + queryValues: urlValues, + contentBody: strings.NewReader(string(data)), + contentLength: int64(len(data)), + } + + // Execute PUT to upload a new bucket QoS configuration. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucket, "") + } + } + return nil +} + +// CounterMetric returns stats for a counter +type CounterMetric struct { + Last1m uint64 `json:"last1m"` + Last1hr uint64 `json:"last1hr"` + Total uint64 `json:"total"` +} + +// QOSMetric - metric for a qos rule per bucket +type QOSMetric struct { + APIName string `json:"apiName"` + Rule QOSRule `json:"rule"` + Totals CounterMetric `json:"totals"` + Throttled CounterMetric `json:"throttleCount"` + ExceededRateLimit CounterMetric `json:"exceededRateLimitCount"` + ClientDisconnCount CounterMetric `json:"clientDisconnectCount"` + ReqTimeoutCount CounterMetric `json:"reqTimeoutCount"` +} + +// QOSNodeStats represents stats for a bucket on a single node +type QOSNodeStats struct { + Stats []QOSMetric `json:"stats"` + NodeName string `json:"node"` +} + +// GetBucketQOSMetrics retrieves Quality of Service (QoS) metrics for a bucket. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// - nodeName: Name of the node (empty string for all nodes) +// +// Returns QoS metrics per node or an error if the operation fails. +func (c *Client) GetBucketQOSMetrics(ctx context.Context, bucketName, nodeName string) (qs []QOSNodeStats, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return qs, err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("qos-metrics", "") + if nodeName != "" { + urlValues.Set("node", nodeName) + } + // Execute GET on bucket to get qos metrics. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return qs, err + } + + if resp.StatusCode != http.StatusOK { + return qs, httpRespToErrorResponse(resp, bucketName, "") + } + respBytes, err := io.ReadAll(resp.Body) + if err != nil { + return qs, err + } + + if err := json.Unmarshal(respBytes, &qs); err != nil { + return qs, err + } + return qs, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go b/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go index 8632bb85db4..6dd7ae8934c 100644 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go @@ -20,6 +20,7 @@ package minio import ( "bytes" "context" + "encoding/json" "encoding/xml" "io" "net/http" @@ -27,17 +28,30 @@ import ( "time" "github.com/google/uuid" - "github.com/minio/minio-go/v7/internal/json" "github.com/minio/minio-go/v7/pkg/replication" "github.com/minio/minio-go/v7/pkg/s3utils" ) -// RemoveBucketReplication removes a replication config on an existing bucket. +// RemoveBucketReplication removes the replication configuration from an existing bucket. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// +// Returns an error if the operation fails. func (c *Client) RemoveBucketReplication(ctx context.Context, bucketName string) error { return c.removeBucketReplication(ctx, bucketName) } -// SetBucketReplication sets a replication config on an existing bucket. +// SetBucketReplication sets the replication configuration on an existing bucket. +// If the provided configuration is empty, this method removes the existing replication configuration. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// - cfg: Replication configuration to apply +// +// Returns an error if the operation fails. func (c *Client) SetBucketReplication(ctx context.Context, bucketName string, cfg replication.Config) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { @@ -108,8 +122,14 @@ func (c *Client) removeBucketReplication(ctx context.Context, bucketName string) return nil } -// GetBucketReplication fetches bucket replication configuration.If config is not -// found, returns empty config with nil error. +// GetBucketReplication retrieves the bucket replication configuration. +// If no replication configuration is found, returns an empty config with nil error. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// +// Returns the replication configuration or an error if the operation fails. func (c *Client) GetBucketReplication(ctx context.Context, bucketName string) (cfg replication.Config, err error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { @@ -155,7 +175,13 @@ func (c *Client) getBucketReplication(ctx context.Context, bucketName string) (c return cfg, nil } -// GetBucketReplicationMetrics fetches bucket replication status metrics +// GetBucketReplicationMetrics retrieves bucket replication status metrics. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// +// Returns the replication metrics or an error if the operation fails. func (c *Client) GetBucketReplicationMetrics(ctx context.Context, bucketName string) (s replication.Metrics, err error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { @@ -200,8 +226,15 @@ func mustGetUUID() string { return u.String() } -// ResetBucketReplication kicks off replication of previously replicated objects if ExistingObjectReplication -// is enabled in the replication config +// ResetBucketReplication initiates replication of previously replicated objects. +// This requires ExistingObjectReplication to be enabled in the replication configuration. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// - olderThan: Only replicate objects older than this duration (0 for all objects) +// +// Returns a reset ID that can be used to track the operation, or an error if the operation fails. func (c *Client) ResetBucketReplication(ctx context.Context, bucketName string, olderThan time.Duration) (rID string, err error) { rID = mustGetUUID() _, err = c.resetBucketReplicationOnTarget(ctx, bucketName, olderThan, "", rID) @@ -211,8 +244,16 @@ func (c *Client) ResetBucketReplication(ctx context.Context, bucketName string, return rID, nil } -// ResetBucketReplicationOnTarget kicks off replication of previously replicated objects if -// ExistingObjectReplication is enabled in the replication config +// ResetBucketReplicationOnTarget initiates replication of previously replicated objects to a specific target. +// This requires ExistingObjectReplication to be enabled in the replication configuration. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// - olderThan: Only replicate objects older than this duration (0 for all objects) +// - tgtArn: ARN of the target to reset replication for +// +// Returns resync target information or an error if the operation fails. func (c *Client) ResetBucketReplicationOnTarget(ctx context.Context, bucketName string, olderThan time.Duration, tgtArn string) (replication.ResyncTargetsInfo, error) { return c.resetBucketReplicationOnTarget(ctx, bucketName, olderThan, tgtArn, mustGetUUID()) } @@ -222,7 +263,7 @@ func (c *Client) ResetBucketReplicationOnTarget(ctx context.Context, bucketName func (c *Client) resetBucketReplicationOnTarget(ctx context.Context, bucketName string, olderThan time.Duration, tgtArn, resetID string) (rinfo replication.ResyncTargetsInfo, err error) { // Input validation. if err = s3utils.CheckValidBucketName(bucketName); err != nil { - return + return rinfo, err } // Get resources properly escaped and lined up before // using them in http request. @@ -256,7 +297,14 @@ func (c *Client) resetBucketReplicationOnTarget(ctx context.Context, bucketName return rinfo, nil } -// GetBucketReplicationResyncStatus gets the status of replication resync +// GetBucketReplicationResyncStatus retrieves the status of a replication resync operation. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// - arn: ARN of the replication target (empty string for all targets) +// +// Returns resync status information or an error if the operation fails. func (c *Client) GetBucketReplicationResyncStatus(ctx context.Context, bucketName, arn string) (rinfo replication.ResyncTargetsInfo, err error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { @@ -290,11 +338,18 @@ func (c *Client) GetBucketReplicationResyncStatus(ctx context.Context, bucketNam return rinfo, nil } -// CancelBucketReplicationResync cancels in progress replication resync +// CancelBucketReplicationResync cancels an in-progress replication resync operation. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// - tgtArn: ARN of the replication target (empty string for all targets) +// +// Returns the ID of the canceled resync operation or an error if the operation fails. func (c *Client) CancelBucketReplicationResync(ctx context.Context, bucketName string, tgtArn string) (id string, err error) { // Input validation. if err = s3utils.CheckValidBucketName(bucketName); err != nil { - return + return id, err } // Get resources properly escaped and lined up before // using them in http request. @@ -326,7 +381,13 @@ func (c *Client) CancelBucketReplicationResync(ctx context.Context, bucketName s return id, nil } -// GetBucketReplicationMetricsV2 fetches bucket replication status metrics +// GetBucketReplicationMetricsV2 retrieves bucket replication status metrics using the V2 API. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// +// Returns the V2 replication metrics or an error if the operation fails. func (c *Client) GetBucketReplicationMetricsV2(ctx context.Context, bucketName string) (s replication.MetricsV2, err error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { @@ -362,7 +423,13 @@ func (c *Client) GetBucketReplicationMetricsV2(ctx context.Context, bucketName s return s, nil } -// CheckBucketReplication validates if replication is set up properly for a bucket +// CheckBucketReplication validates whether replication is properly configured for a bucket. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// +// Returns nil if replication is valid, or an error describing the validation failure. func (c *Client) CheckBucketReplication(ctx context.Context, bucketName string) (err error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go b/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go index 86d74298a6a..921f90f999b 100644 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go @@ -29,8 +29,14 @@ import ( "github.com/minio/minio-go/v7/pkg/tags" ) -// GetBucketTagging fetch tagging configuration for a bucket with a -// context to control cancellations and timeouts. +// GetBucketTagging fetches the tagging configuration for a bucket. +// It uses the provided context to control cancellations and timeouts. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// +// Returns the bucket's tags or an error if the operation fails. func (c *Client) GetBucketTagging(ctx context.Context, bucketName string) (*tags.Tags, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { @@ -61,8 +67,15 @@ func (c *Client) GetBucketTagging(ctx context.Context, bucketName string) (*tags return tags.ParseBucketXML(resp.Body) } -// SetBucketTagging sets tagging configuration for a bucket -// with a context to control cancellations and timeouts. +// SetBucketTagging sets the tagging configuration for a bucket. +// It uses the provided context to control cancellations and timeouts. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// - tags: Tag set to apply to the bucket +// +// Returns an error if the operation fails or if tags is nil. func (c *Client) SetBucketTagging(ctx context.Context, bucketName string, tags *tags.Tags) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { @@ -104,8 +117,14 @@ func (c *Client) SetBucketTagging(ctx context.Context, bucketName string, tags * return nil } -// RemoveBucketTagging removes tagging configuration for a -// bucket with a context to control cancellations and timeouts. +// RemoveBucketTagging removes the tagging configuration from a bucket. +// It uses the provided context to control cancellations and timeouts. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// +// Returns an error if the operation fails. func (c *Client) RemoveBucketTagging(ctx context.Context, bucketName string) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { diff --git a/vendor/github.com/minio/minio-go/v7/api-compose-object.go b/vendor/github.com/minio/minio-go/v7/api-compose-object.go index 154af7121a4..232bd2c01d0 100644 --- a/vendor/github.com/minio/minio-go/v7/api-compose-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-compose-object.go @@ -42,13 +42,15 @@ type CopyDestOptions struct { // provided key. If it is nil, no encryption is performed. Encryption encrypt.ServerSide + ChecksumType ChecksumType + // `userMeta` is the user-metadata key-value pairs to be set on the // destination. The keys are automatically prefixed with `x-amz-meta-` // if needed. If nil is passed, and if only a single source (of any // size) is provided in the ComposeObject call, then metadata from the // source is copied to the destination. // if no user-metadata is provided, it is copied from source - // (when there is only once source object in the compose + // (when there is only one source object in the compose // request) UserMetadata map[string]string // UserMetadata is only set to destination if ReplaceMetadata is true @@ -140,6 +142,9 @@ func (opts CopyDestOptions) Marshal(header http.Header) { if !opts.Expires.IsZero() { header.Set("Expires", opts.Expires.UTC().Format(http.TimeFormat)) } + if opts.ChecksumType.IsSet() { + header.Set(amzChecksumAlgo, opts.ChecksumType.String()) + } if opts.ReplaceMetadata { header.Set("x-amz-metadata-directive", replaceDirective) @@ -345,7 +350,7 @@ func (c *Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, des }) defer closeResponse(resp) if err != nil { - return + return p, err } // Check if we got an error response. @@ -580,7 +585,7 @@ func partsRequired(size int64) int64 { // it is not the last part. func calculateEvenSplits(size int64, src CopySrcOptions) (startIndex, endIndex []int64) { if size == 0 { - return + return startIndex, endIndex } reqParts := partsRequired(size) @@ -617,5 +622,5 @@ func calculateEvenSplits(size int64, src CopySrcOptions) (startIndex, endIndex [ startIndex[j], endIndex[j] = cStart, cEnd } - return + return startIndex, endIndex } diff --git a/vendor/github.com/minio/minio-go/v7/api-error-response.go b/vendor/github.com/minio/minio-go/v7/api-error-response.go index e85aa322ca4..e5f88d98e19 100644 --- a/vendor/github.com/minio/minio-go/v7/api-error-response.go +++ b/vendor/github.com/minio/minio-go/v7/api-error-response.go @@ -20,6 +20,7 @@ package minio import ( "bytes" "encoding/xml" + "errors" "fmt" "io" "net/http" @@ -128,9 +129,18 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) Server: resp.Header.Get("Server"), } + _, success := successStatus[resp.StatusCode] + errBody, err := xmlDecodeAndBody(resp.Body, &errResp) // Xml decoding failed with no body, fall back to HTTP headers. if err != nil { + var unmarshalErr xml.UnmarshalError + if success && errors.As(err, &unmarshalErr) { + // This is a successful message so not an error response + // return nil, + return nil + } + switch resp.StatusCode { case http.StatusNotFound: if objectName == "" { diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object-attributes.go b/vendor/github.com/minio/minio-go/v7/api-get-object-attributes.go index e1155c37239..d2e8cabded9 100644 --- a/vendor/github.com/minio/minio-go/v7/api-get-object-attributes.go +++ b/vendor/github.com/minio/minio-go/v7/api-get-object-attributes.go @@ -127,7 +127,7 @@ func (o *ObjectAttributes) parseResponse(resp *http.Response) (err error) { } o.ObjectAttributesResponse = *response - return + return err } // GetObjectAttributes API combines HeadObject and ListParts. diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object-file.go b/vendor/github.com/minio/minio-go/v7/api-get-object-file.go index 567a42e450c..6ef9c9330ee 100644 --- a/vendor/github.com/minio/minio-go/v7/api-get-object-file.go +++ b/vendor/github.com/minio/minio-go/v7/api-get-object-file.go @@ -69,7 +69,7 @@ func (c *Client) FGetObject(ctx context.Context, bucketName, objectName, filePat } // Write to a temporary file "fileName.part.minio" before saving. - filePartPath := filePath + sum256Hex([]byte(objectStat.ETag)) + ".part.minio" + filePartPath := filepath.Join(filepath.Dir(filePath), sum256Hex([]byte(filepath.Base(filePath)+objectStat.ETag))+".part.minio") // If exists, open in append mode. If not create it as a part file. filePart, err := os.OpenFile(filePartPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0o600) diff --git a/vendor/github.com/minio/minio-go/v7/api-inventory-ext.go b/vendor/github.com/minio/minio-go/v7/api-inventory-ext.go new file mode 100644 index 00000000000..498300785fc --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-inventory-ext.go @@ -0,0 +1,332 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2025 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "encoding/json" + "io" + "iter" + "net/http" + "net/url" + "strings" + "time" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// This file contains the inventory API extension for MinIO server. It is not +// compatible with AWS S3. + +func makeInventoryReqMetadata(bucket string, urlParams ...string) requestMetadata { + urlValues := make(url.Values) + urlValues.Set("minio-inventory", "") + + // If an odd number of parameters is given, we skip the last pair to avoid + // an out of bounds access. + for i := 0; i+1 < len(urlParams); i += 2 { + urlValues.Set(urlParams[i], urlParams[i+1]) + } + + return requestMetadata{ + bucketName: bucket, + queryValues: urlValues, + } +} + +// GenerateInventoryConfigYAML generates a YAML template for an inventory configuration. +// This is a MinIO-specific API and is not compatible with AWS S3. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucket: Name of the bucket +// - id: Unique identifier for the inventory configuration +// +// Returns a YAML template string that can be customized and used with PutBucketInventoryConfiguration. +func (c *Client) GenerateInventoryConfigYAML(ctx context.Context, bucket, id string) (string, error) { + if err := s3utils.CheckValidBucketName(bucket); err != nil { + return "", err + } + if id == "" { + return "", errInvalidArgument("inventory ID cannot be empty") + } + reqMeta := makeInventoryReqMetadata(bucket, "generate", "", "id", id) + resp, err := c.executeMethod(ctx, http.MethodGet, reqMeta) + defer closeResponse(resp) + if err != nil { + return "", err + } + if resp.StatusCode != http.StatusOK { + return "", httpRespToErrorResponse(resp, bucket, "") + } + buf := new(strings.Builder) + _, err = io.Copy(buf, resp.Body) + return buf.String(), err +} + +// inventoryPutConfigOpts is a placeholder for future options that may be added. +type inventoryPutConfigOpts struct{} + +// InventoryPutConfigOption is to allow for functional options for +// PutBucketInventoryConfiguration. It may be used in the future to customize +// the PutBucketInventoryConfiguration request, but currently does not do +// anything. +type InventoryPutConfigOption func(*inventoryPutConfigOpts) + +// PutBucketInventoryConfiguration creates or updates an inventory configuration for a bucket. +// This is a MinIO-specific API and is not compatible with AWS S3. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucket: Name of the bucket +// - id: Unique identifier for the inventory configuration +// - yamlDef: YAML definition of the inventory configuration +// +// Returns an error if the operation fails, or if bucket name, id, or yamlDef is empty. +func (c *Client) PutBucketInventoryConfiguration(ctx context.Context, bucket string, id string, yamlDef string, _ ...InventoryPutConfigOption) error { + if err := s3utils.CheckValidBucketName(bucket); err != nil { + return err + } + if id == "" { + return errInvalidArgument("inventory ID cannot be empty") + } + if yamlDef == "" { + return errInvalidArgument("YAML definition cannot be empty") + } + reqMeta := makeInventoryReqMetadata(bucket, "id", id) + reqMeta.contentBody = strings.NewReader(yamlDef) + reqMeta.contentLength = int64(len(yamlDef)) + reqMeta.contentMD5Base64 = sumMD5Base64([]byte(yamlDef)) + + resp, err := c.executeMethod(ctx, http.MethodPut, reqMeta) + defer closeResponse(resp) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucket, "") + } + return nil +} + +// GetBucketInventoryConfiguration retrieves the inventory configuration for a bucket. +// This is a MinIO-specific API and is not compatible with AWS S3. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucket: Name of the bucket +// - id: Unique identifier for the inventory configuration +// +// Returns the inventory configuration or an error if the operation fails or if the configuration doesn't exist. +func (c *Client) GetBucketInventoryConfiguration(ctx context.Context, bucket, id string) (*InventoryConfiguration, error) { + if err := s3utils.CheckValidBucketName(bucket); err != nil { + return nil, err + } + if id == "" { + return nil, errInvalidArgument("inventory ID cannot be empty") + } + reqMeta := makeInventoryReqMetadata(bucket, "id", id) + resp, err := c.executeMethod(ctx, http.MethodGet, reqMeta) + defer closeResponse(resp) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucket, "") + } + decoder := json.NewDecoder(resp.Body) + var ic InventoryConfiguration + err = decoder.Decode(&ic) + if err != nil { + return nil, err + } + return &ic, nil +} + +// DeleteBucketInventoryConfiguration deletes an inventory configuration from a bucket. +// This is a MinIO-specific API and is not compatible with AWS S3. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucket: Name of the bucket +// - id: Unique identifier for the inventory configuration to delete +// +// Returns an error if the operation fails or if the configuration doesn't exist. +func (c *Client) DeleteBucketInventoryConfiguration(ctx context.Context, bucket, id string) error { + if err := s3utils.CheckValidBucketName(bucket); err != nil { + return err + } + if id == "" { + return errInvalidArgument("inventory ID cannot be empty") + } + reqMeta := makeInventoryReqMetadata(bucket, "id", id) + resp, err := c.executeMethod(ctx, http.MethodDelete, reqMeta) + defer closeResponse(resp) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucket, "") + } + return nil +} + +// InventoryConfiguration represents the inventory configuration +type InventoryConfiguration struct { + Bucket string `json:"bucket"` + ID string `json:"id"` + User string `json:"user"` + YamlDef string `json:"yamlDef,omitempty"` +} + +// InventoryListResult represents the result of listing inventory +// configurations. +type InventoryListResult struct { + Items []InventoryConfiguration `json:"items"` + NextContinuationToken string `json:"nextContinuationToken,omitempty"` +} + +// ListBucketInventoryConfigurations lists up to 100 inventory configurations for a bucket. +// This is a MinIO-specific API and is not compatible with AWS S3. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucket: Name of the bucket +// - continuationToken: Token for pagination (empty string for first request) +// +// Returns a list result with configurations and a continuation token for the next page, or an error. +func (c *Client) ListBucketInventoryConfigurations(ctx context.Context, bucket, continuationToken string) (lr *InventoryListResult, err error) { + if err := s3utils.CheckValidBucketName(bucket); err != nil { + return nil, err + } + reqMeta := makeInventoryReqMetadata(bucket, "continuation-token", continuationToken) + resp, err := c.executeMethod(ctx, http.MethodGet, reqMeta) + defer closeResponse(resp) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucket, "") + } + + decoder := json.NewDecoder(resp.Body) + err = decoder.Decode(&lr) + if err != nil { + return nil, err + } + return lr, nil +} + +// ListBucketInventoryConfigurationsIterator returns an iterator that lists all inventory configurations +// for a bucket. This is a MinIO-specific API and is not compatible with AWS S3. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucket: Name of the bucket +// +// Returns an iterator that yields InventoryConfiguration values and errors. The iterator automatically +// handles pagination and fetches all configurations. +func (c *Client) ListBucketInventoryConfigurationsIterator(ctx context.Context, bucket string) iter.Seq2[InventoryConfiguration, error] { + return func(yield func(InventoryConfiguration, error) bool) { + if err := s3utils.CheckValidBucketName(bucket); err != nil { + yield(InventoryConfiguration{}, err) + return + } + var continuationToken string + for { + listResult, err := c.ListBucketInventoryConfigurations(ctx, bucket, continuationToken) + if err != nil { + yield(InventoryConfiguration{}, err) + return + } + + for _, item := range listResult.Items { + if !yield(item, nil) { + return + } + } + + if listResult.NextContinuationToken == "" { + return + } + continuationToken = listResult.NextContinuationToken + } + } +} + +// InventoryJobStatus represents the status of an inventory job. +type InventoryJobStatus struct { + Bucket string `json:"bucket"` + ID string `json:"id"` + User string `json:"user"` + AccessKey string `json:"accessKey"` + Schedule string `json:"schedule"` + State string `json:"state"` + NextScheduledTime time.Time `json:"nextScheduledTime,omitempty"` + StartTime time.Time `json:"startTime,omitempty"` + EndTime time.Time `json:"endTime,omitempty"` + LastUpdate time.Time `json:"lastUpdate,omitempty"` + Scanned string `json:"scanned,omitempty"` + Matched string `json:"matched,omitempty"` + ScannedCount uint64 `json:"scannedCount,omitempty"` + MatchedCount uint64 `json:"matchedCount,omitempty"` + RecordsWritten uint64 `json:"recordsWritten,omitempty"` + OutputFilesCount uint64 `json:"outputFilesCount,omitempty"` + ExecutionTime string `json:"executionTime,omitempty"` + NumStarts uint64 `json:"numStarts,omitempty"` + NumErrors uint64 `json:"numErrors,omitempty"` + NumLockLosses uint64 `json:"numLockLosses,omitempty"` + ManifestPath string `json:"manifestPath,omitempty"` + RetryAttempts uint64 `json:"retryAttempts,omitempty"` + LastFailTime time.Time `json:"lastFailTime,omitempty"` + LastFailErrors []string `json:"lastFailErrors,omitempty"` +} + +// GetBucketInventoryJobStatus retrieves the status of an inventory job for a bucket. +// This is a MinIO-specific API and is not compatible with AWS S3. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucket: Name of the bucket +// - id: Unique identifier for the inventory job +// +// Returns the inventory job status including execution state, progress, and error information, or an error if the operation fails. +func (c *Client) GetBucketInventoryJobStatus(ctx context.Context, bucket, id string) (*InventoryJobStatus, error) { + if err := s3utils.CheckValidBucketName(bucket); err != nil { + return nil, err + } + if id == "" { + return nil, errInvalidArgument("inventory ID cannot be empty") + } + reqMeta := makeInventoryReqMetadata(bucket, "id", id, "status", "") + resp, err := c.executeMethod(ctx, http.MethodGet, reqMeta) + defer closeResponse(resp) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucket, "") + } + decoder := json.NewDecoder(resp.Body) + var jStatus InventoryJobStatus + err = decoder.Decode(&jStatus) + if err != nil { + return nil, err + } + return &jStatus, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go b/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go index 0c027d550f7..5ad9a494371 100644 --- a/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go +++ b/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go @@ -80,7 +80,16 @@ func newObjectLegalHold(status *LegalHoldStatus) (*objectLegalHold, error) { return legalHold, nil } -// PutObjectLegalHold : sets object legal hold for a given object and versionID. +// PutObjectLegalHold sets the legal hold status for an object and specific version. +// Legal hold prevents an object version from being overwritten or deleted, regardless of retention settings. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// - objectName: Name of the object +// - opts: Options including Status (LegalHoldEnabled or LegalHoldDisabled) and optional VersionID +// +// Returns an error if the operation fails or if the status is invalid. func (c *Client) PutObjectLegalHold(ctx context.Context, bucketName, objectName string, opts PutObjectLegalHoldOptions) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { @@ -134,7 +143,15 @@ func (c *Client) PutObjectLegalHold(ctx context.Context, bucketName, objectName return nil } -// GetObjectLegalHold gets legal-hold status of given object. +// GetObjectLegalHold retrieves the legal hold status for an object and specific version. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// - objectName: Name of the object +// - opts: Options including optional VersionID to target a specific version +// +// Returns the legal hold status (LegalHoldEnabled or LegalHoldDisabled) or an error if the operation fails. func (c *Client) GetObjectLegalHold(ctx context.Context, bucketName, objectName string, opts GetObjectLegalHoldOptions) (status *LegalHoldStatus, err error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { diff --git a/vendor/github.com/minio/minio-go/v7/api-object-retention.go b/vendor/github.com/minio/minio-go/v7/api-object-retention.go index b29cb1f8dce..2efb5d89a6a 100644 --- a/vendor/github.com/minio/minio-go/v7/api-object-retention.go +++ b/vendor/github.com/minio/minio-go/v7/api-object-retention.go @@ -62,7 +62,16 @@ type PutObjectRetentionOptions struct { VersionID string } -// PutObjectRetention sets object retention for a given object and versionID. +// PutObjectRetention sets the retention configuration for an object and specific version. +// Object retention prevents an object version from being deleted or overwritten for a specified period. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// - objectName: Name of the object +// - opts: Options including Mode (GOVERNANCE or COMPLIANCE), RetainUntilDate, optional VersionID, and GovernanceBypass +// +// Returns an error if the operation fails or if the retention settings are invalid. func (c *Client) PutObjectRetention(ctx context.Context, bucketName, objectName string, opts PutObjectRetentionOptions) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { @@ -125,7 +134,15 @@ func (c *Client) PutObjectRetention(ctx context.Context, bucketName, objectName return nil } -// GetObjectRetention gets retention of given object. +// GetObjectRetention retrieves the retention configuration for an object and specific version. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// - objectName: Name of the object +// - versionID: Optional version ID to target a specific version (empty string for current version) +// +// Returns the retention mode (GOVERNANCE or COMPLIANCE), retain-until date, and any error. func (c *Client) GetObjectRetention(ctx context.Context, bucketName, objectName, versionID string) (mode *RetentionMode, retainUntilDate *time.Time, err error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { diff --git a/vendor/github.com/minio/minio-go/v7/api-object-tagging.go b/vendor/github.com/minio/minio-go/v7/api-object-tagging.go index 6623e262a8b..66d13110678 100644 --- a/vendor/github.com/minio/minio-go/v7/api-object-tagging.go +++ b/vendor/github.com/minio/minio-go/v7/api-object-tagging.go @@ -40,8 +40,17 @@ type AdvancedObjectTaggingOptions struct { ReplicationProxyRequest string } -// PutObjectTagging replaces or creates object tag(s) and can target -// a specific object version in a versioned bucket. +// PutObjectTagging replaces or creates object tag(s) and can target a specific object version +// in a versioned bucket. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// - objectName: Name of the object +// - otags: Tags to apply to the object +// - opts: Options including VersionID to target a specific version +// +// Returns an error if the operation fails. func (c *Client) PutObjectTagging(ctx context.Context, bucketName, objectName string, otags *tags.Tags, opts PutObjectTaggingOptions) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { @@ -96,8 +105,16 @@ type GetObjectTaggingOptions struct { Internal AdvancedObjectTaggingOptions } -// GetObjectTagging fetches object tag(s) with options to target -// a specific object version in a versioned bucket. +// GetObjectTagging retrieves object tag(s) with options to target a specific object version +// in a versioned bucket. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// - objectName: Name of the object +// - opts: Options including VersionID to target a specific version +// +// Returns the object's tags or an error if the operation fails. func (c *Client) GetObjectTagging(ctx context.Context, bucketName, objectName string, opts GetObjectTaggingOptions) (*tags.Tags, error) { // Get resources properly escaped and lined up before // using them in http request. @@ -139,8 +156,16 @@ type RemoveObjectTaggingOptions struct { Internal AdvancedObjectTaggingOptions } -// RemoveObjectTagging removes object tag(s) with options to control a specific object -// version in a versioned bucket +// RemoveObjectTagging removes object tag(s) with options to target a specific object version +// in a versioned bucket. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// - objectName: Name of the object +// - opts: Options including VersionID to target a specific version +// +// Returns an error if the operation fails. func (c *Client) RemoveObjectTagging(ctx context.Context, bucketName, objectName string, opts RemoveObjectTaggingOptions) error { // Get resources properly escaped and lined up before // using them in http request. diff --git a/vendor/github.com/minio/minio-go/v7/api-prompt-object.go b/vendor/github.com/minio/minio-go/v7/api-prompt-object.go index 26c41d34aa7..55c038ae028 100644 --- a/vendor/github.com/minio/minio-go/v7/api-prompt-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-prompt-object.go @@ -20,10 +20,10 @@ package minio import ( "bytes" "context" + "encoding/json" "io" "net/http" - "github.com/minio/minio-go/v7/internal/json" "github.com/minio/minio-go/v7/pkg/s3utils" ) diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-common.go b/vendor/github.com/minio/minio-go/v7/api-put-object-common.go index 9ccb97cbb97..52f69563ca4 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-common.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-common.go @@ -31,7 +31,7 @@ const nullVersionID = "null" // Verify if reader is *minio.Object func isObject(reader io.Reader) (ok bool) { _, ok = reader.(*Object) - return + return ok } // Verify if reader is a generic ReaderAt @@ -56,7 +56,7 @@ func isReadAt(reader io.Reader) (ok bool) { } else { _, ok = reader.(io.ReaderAt) } - return + return ok } // OptimalPartInfo - calculate the optimal part info for a given @@ -79,31 +79,31 @@ func OptimalPartInfo(objectSize int64, configuredPartSize uint64) (totalPartsCou // object size is larger than supported maximum. if objectSize > maxMultipartPutObjectSize { err = errEntityTooLarge(objectSize, maxMultipartPutObjectSize, "", "") - return + return totalPartsCount, partSize, lastPartSize, err } var partSizeFlt float64 if configuredPartSize > 0 { if int64(configuredPartSize) > objectSize { err = errEntityTooLarge(int64(configuredPartSize), objectSize, "", "") - return + return totalPartsCount, partSize, lastPartSize, err } if !unknownSize { if objectSize > (int64(configuredPartSize) * maxPartsCount) { err = errInvalidArgument("Part size * max_parts(10000) is lesser than input objectSize.") - return + return totalPartsCount, partSize, lastPartSize, err } } if configuredPartSize < absMinPartSize { err = errInvalidArgument("Input part size is smaller than allowed minimum of 5MiB.") - return + return totalPartsCount, partSize, lastPartSize, err } if configuredPartSize > maxPartSize { err = errInvalidArgument("Input part size is bigger than allowed maximum of 5GiB.") - return + return totalPartsCount, partSize, lastPartSize, err } partSizeFlt = float64(configuredPartSize) diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go b/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go index a6b5149f05d..3023b949cd4 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go @@ -19,6 +19,7 @@ package minio import ( "context" + "encoding/json" "errors" "io" "mime/multipart" @@ -27,7 +28,6 @@ import ( "strings" "time" - "github.com/minio/minio-go/v7/internal/json" "github.com/minio/minio-go/v7/pkg/encrypt" ) diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go index db5314d5f7a..79d0c1dc1ba 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go @@ -210,9 +210,13 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN } objPart, err := c.uploadPart(ctx, p) if err != nil { - uploadedPartsCh <- uploadedPartRes{ + select { + case <-ctx.Done(): + case uploadedPartsCh <- uploadedPartRes{ Error: err, + }: } + // Exit the goroutine. return } @@ -221,10 +225,13 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN uploadReq.Part = objPart // Send successful part info through the channel. - uploadedPartsCh <- uploadedPartRes{ + select { + case <-ctx.Done(): + case uploadedPartsCh <- uploadedPartRes{ Size: objPart.Size, PartNum: uploadReq.PartNum, Part: uploadReq.Part, + }: } } }(partSize) diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object.go b/vendor/github.com/minio/minio-go/v7/api-put-object.go index 877cecb84c5..80f3d61f34a 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object.go @@ -150,7 +150,7 @@ func (opts PutObjectOptions) getNumThreads() (numThreads int) { } else { numThreads = totalWorkers } - return + return numThreads } // Header - constructs the headers from metadata entered by user in @@ -249,7 +249,7 @@ func (opts PutObjectOptions) Header() (header http.Header) { header[k] = v } - return + return header } // validate() checks if the UserMetadata map has standard headers or and raises an error if so. diff --git a/vendor/github.com/minio/minio-go/v7/api-remove.go b/vendor/github.com/minio/minio-go/v7/api-remove.go index 2a38e014a23..9794ffb2bde 100644 --- a/vendor/github.com/minio/minio-go/v7/api-remove.go +++ b/vendor/github.com/minio/minio-go/v7/api-remove.go @@ -617,12 +617,11 @@ func (c *Client) removeObjects(ctx context.Context, bucketName string, objectsCh customHeader: headers, expect200OKWithError: true, }) - if resp != nil { - if resp.StatusCode != http.StatusOK { - e := httpRespToErrorResponse(resp, bucketName, "") - resultCh <- RemoveObjectResult{ObjectName: "", Err: e} - } + + if resp != nil && resp.StatusCode != http.StatusOK { + err = httpRespToErrorResponse(resp, bucketName, "") } + if err != nil { for _, b := range batch { resultCh <- RemoveObjectResult{ diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go index 53ef6b85a8e..5352d793b87 100644 --- a/vendor/github.com/minio/minio-go/v7/api.go +++ b/vendor/github.com/minio/minio-go/v7/api.go @@ -21,7 +21,6 @@ import ( "bytes" "context" "encoding/base64" - "encoding/xml" "errors" "fmt" "io" @@ -43,7 +42,6 @@ import ( md5simd "github.com/minio/md5-simd" "github.com/minio/minio-go/v7/pkg/credentials" "github.com/minio/minio-go/v7/pkg/kvcache" - "github.com/minio/minio-go/v7/pkg/peeker" "github.com/minio/minio-go/v7/pkg/s3utils" "github.com/minio/minio-go/v7/pkg/signer" "github.com/minio/minio-go/v7/pkg/singleflight" @@ -162,7 +160,7 @@ type Options struct { // Global constants. const ( libraryName = "minio-go" - libraryVersion = "v7.0.95" + libraryVersion = "v7.0.96" ) // User Agent should always following the below style. @@ -204,7 +202,9 @@ func New(endpoint string, opts *Options) (*Client, error) { return clnt, nil } -// EndpointURL returns the URL of the S3 endpoint. +// EndpointURL returns the URL of the S3-compatible endpoint that this client connects to. +// +// Returns a copy of the endpoint URL to prevent modification of internal state. func (c *Client) EndpointURL() *url.URL { endpoint := *c.endpointURL // copy to prevent callers from modifying internal state return &endpoint @@ -221,7 +221,7 @@ func (r *lockedRandSource) Int63() (n int64) { r.lk.Lock() n = r.src.Int63() r.lk.Unlock() - return + return n } // Seed uses the provided seed value to initialize the generator to a @@ -325,7 +325,14 @@ func privateNew(endpoint string, opts *Options) (*Client, error) { return clnt, nil } -// SetAppInfo - add application details to user agent. +// SetAppInfo adds custom application name and version to the User-Agent header for all requests. +// This helps identify your application in server logs and metrics. +// +// Parameters: +// - appName: Name of the application +// - appVersion: Version of the application +// +// Both parameters must be non-empty for the custom User-Agent to be set. func (c *Client) SetAppInfo(appName, appVersion string) { // if app name and version not set, we do not set a new user agent. if appName != "" && appVersion != "" { @@ -334,7 +341,11 @@ func (c *Client) SetAppInfo(appName, appVersion string) { } } -// TraceOn - enable HTTP tracing. +// TraceOn enables HTTP request and response tracing for debugging purposes. +// All HTTP traffic will be written to the provided output stream. +// +// Parameters: +// - outputStream: Writer where trace output will be written (defaults to os.Stdout if nil) func (c *Client) TraceOn(outputStream io.Writer) { // if outputStream is nil then default to os.Stdout. if outputStream == nil { @@ -347,19 +358,23 @@ func (c *Client) TraceOn(outputStream io.Writer) { c.isTraceEnabled = true } -// TraceErrorsOnlyOn - same as TraceOn, but only errors will be traced. +// TraceErrorsOnlyOn enables HTTP tracing but only for requests that result in errors. +// This is useful for debugging without the overhead of tracing all requests. +// +// Parameters: +// - outputStream: Writer where trace output will be written (defaults to os.Stdout if nil) func (c *Client) TraceErrorsOnlyOn(outputStream io.Writer) { c.TraceOn(outputStream) c.traceErrorsOnly = true } -// TraceErrorsOnlyOff - Turns off the errors only tracing and everything will be traced after this call. -// If all tracing needs to be turned off, call TraceOff(). +// TraceErrorsOnlyOff disables errors-only mode and traces all requests. +// To disable all tracing, call TraceOff() instead. func (c *Client) TraceErrorsOnlyOff() { c.traceErrorsOnly = false } -// TraceOff - disable HTTP tracing. +// TraceOff disables all HTTP tracing (both normal and errors-only modes). func (c *Client) TraceOff() { // Disable tracing. c.isTraceEnabled = false @@ -620,33 +635,11 @@ func (c *Client) do(req *http.Request) (resp *http.Response, err error) { return resp, nil } -// Peek resp.Body looking for S3 XMl error response: -// - Return the error XML bytes if an error is found -// - Make sure to always restablish the whole http response stream before returning -func tryParseErrRespFromBody(resp *http.Response) ([]byte, error) { - peeker := peeker.NewPeekReadCloser(resp.Body, 5*humanize.MiByte) - defer func() { - peeker.ReplayFromStart() - resp.Body = peeker - }() - - errResp := ErrorResponse{} - errBytes, err := xmlDecodeAndBody(peeker, &errResp) - if err != nil { - var unmarshalErr xml.UnmarshalError - if errors.As(err, &unmarshalErr) { - return nil, nil - } - return nil, err - } - return errBytes, nil -} - // List of success status. -var successStatus = []int{ - http.StatusOK, - http.StatusNoContent, - http.StatusPartialContent, +var successStatus = map[int]struct{}{ + http.StatusOK: {}, + http.StatusNoContent: {}, + http.StatusPartialContent: {}, } // executeMethod - instantiates a given method, and retries the @@ -729,29 +722,15 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ return nil, err } - var success bool - var errBodyBytes []byte - - for _, httpStatus := range successStatus { - if httpStatus == res.StatusCode { - success = true - break - } - } - - if success { - if !metadata.expect200OKWithError { - return res, nil - } - errBodyBytes, err = tryParseErrRespFromBody(res) - if err == nil && len(errBodyBytes) == 0 { - // No S3 XML error is found - return res, nil - } - } else { - errBodyBytes, err = io.ReadAll(res.Body) - } + _, success := successStatus[res.StatusCode] + if success && !metadata.expect200OKWithError { + // We do not expect 2xx to return an error return. + return res, nil + } // in all other situations we must first parse the body as ErrorResponse + // 5MiB is sufficiently large enough to hold any error or regular XML response. + var bodyBytes []byte + bodyBytes, err = io.ReadAll(io.LimitReader(res.Body, 5*humanize.MiByte)) // By now, res.Body should be closed closeResponse(res) if err != nil { @@ -759,16 +738,22 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ } // Save the body. - errBodySeeker := bytes.NewReader(errBodyBytes) - res.Body = io.NopCloser(errBodySeeker) + bodySeeker := bytes.NewReader(bodyBytes) + res.Body = io.NopCloser(bodySeeker) - // For errors verify if its retryable otherwise fail quickly. - errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName)) - err = errResponse + apiErr := httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName) // Save the body back again. - errBodySeeker.Seek(0, 0) // Seek back to starting point. - res.Body = io.NopCloser(errBodySeeker) + bodySeeker.Seek(0, 0) // Seek back to starting point. + res.Body = io.NopCloser(bodySeeker) + + if apiErr == nil { + return res, nil + } + + // For errors verify if its retryable otherwise fail quickly. + errResponse := ToErrorResponse(apiErr) + err = errResponse // Bucket region if set in error response and the error // code dictates invalid region, we can retry the request diff --git a/vendor/github.com/minio/minio-go/v7/bucket-cache.go b/vendor/github.com/minio/minio-go/v7/bucket-cache.go index b41902f6523..a37a72ae8ec 100644 --- a/vendor/github.com/minio/minio-go/v7/bucket-cache.go +++ b/vendor/github.com/minio/minio-go/v7/bucket-cache.go @@ -142,7 +142,7 @@ func (c *Client) getBucketLocationRequest(ctx context.Context, bucketName string if h, p, err := net.SplitHostPort(targetURL.Host); err == nil { if targetURL.Scheme == "http" && p == "80" || targetURL.Scheme == "https" && p == "443" { targetURL.Host = h - if ip := net.ParseIP(h); ip != nil && ip.To16() != nil { + if ip := net.ParseIP(h); ip != nil && ip.To4() == nil { targetURL.Host = "[" + h + "]" } } diff --git a/vendor/github.com/minio/minio-go/v7/checksum.go b/vendor/github.com/minio/minio-go/v7/checksum.go index 0691c1fbb95..0feb89bf82e 100644 --- a/vendor/github.com/minio/minio-go/v7/checksum.go +++ b/vendor/github.com/minio/minio-go/v7/checksum.go @@ -24,13 +24,13 @@ import ( "encoding/binary" "errors" "hash" - "hash/crc32" "io" "math/bits" "net/http" "sort" "strings" + "github.com/klauspost/crc32" "github.com/minio/crc64nvme" ) diff --git a/vendor/github.com/minio/minio-go/v7/code_of_conduct.md b/vendor/github.com/minio/minio-go/v7/code_of_conduct.md index cb232c3c69c..7dcdbfc3e0e 100644 --- a/vendor/github.com/minio/minio-go/v7/code_of_conduct.md +++ b/vendor/github.com/minio/minio-go/v7/code_of_conduct.md @@ -1,80 +1,52 @@ -# Contributor Covenant Code of Conduct +Contributor Covenant Code of Conduct +==================================== -## Our Pledge +Our Pledge +---------- -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. -## Our Standards +Our Standards +------------- -Examples of behavior that contributes to creating a positive environment -include: +Examples of behavior that contributes to creating a positive environment include: -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members +- Using welcoming and inclusive language +- Being respectful of differing viewpoints and experiences +- Gracefully accepting constructive criticism +- Focusing on what is best for the community +- Showing empathy towards other community members Examples of unacceptable behavior by participants include: -* The use of sexualized language or imagery and unwelcome sexual attention or - advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting +- The use of sexualized language or imagery and unwelcome sexual attention or advances +- Trolling, insulting/derogatory comments, and personal or political attacks +- Public or private harassment +- Publishing others' private information, such as a physical or electronic address, without explicit permission +- Other conduct which could reasonably be considered inappropriate in a professional setting -## Our Responsibilities +Our Responsibilities +-------------------- -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior, in compliance with the -licensing terms applying to the Project developments. +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior, in compliance with the licensing terms applying to the Project developments. -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. However, these actions shall respect the -licensing terms of the Project Developments that will always supersede such -Code of Conduct. +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. However, these actions shall respect the licensing terms of the Project Developments that will always supersede such Code of Conduct. -## Scope +Scope +----- -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. -## Enforcement +Enforcement +----------- -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at dev@min.io. The project team -will review and investigate all complaints, and will respond in a way that it deems -appropriate to the circumstances. The project team is obligated to maintain -confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at dev@min.io. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. -## Attribution +Attribution +----------- -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] +This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.4, available at [http://contributor-covenant.org/version/1/4](http://contributor-covenant.org/version/1/4/) -This version includes a clarification to ensure that the code of conduct is in -compliance with the free software licensing terms of the project. - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ +This version includes a clarification to ensure that the code of conduct is in compliance with the free software licensing terms of the project. diff --git a/vendor/github.com/minio/minio-go/v7/create-session.go b/vendor/github.com/minio/minio-go/v7/create-session.go index 676ad21d135..47c286564e7 100644 --- a/vendor/github.com/minio/minio-go/v7/create-session.go +++ b/vendor/github.com/minio/minio-go/v7/create-session.go @@ -114,7 +114,7 @@ func (c *Client) createSessionRequest(ctx context.Context, bucketName string, se if h, p, err := net.SplitHostPort(host); err == nil { if targetURL.Scheme == "http" && p == "80" || targetURL.Scheme == "https" && p == "443" { host = h - if ip := net.ParseIP(h); ip != nil && ip.To16() != nil { + if ip := net.ParseIP(h); ip != nil && ip.To4() == nil { host = "[" + h + "]" } } diff --git a/vendor/github.com/minio/minio-go/v7/endpoints.go b/vendor/github.com/minio/minio-go/v7/endpoints.go index 00f95d1b52d..34b340b3912 100644 --- a/vendor/github.com/minio/minio-go/v7/endpoints.go +++ b/vendor/github.com/minio/minio-go/v7/endpoints.go @@ -240,6 +240,10 @@ var awsS3EndpointMap = map[string]awsS3Endpoint{ "s3.mx-central-1.amazonaws.com", "s3.dualstack.mx-central-1.amazonaws.com", }, + "ap-east-2": { + "s3.ap-east-2.amazonaws.com", + "s3.dualstack.ap-east-2.amazonaws.com", + }, } // getS3ExpressEndpoint get Amazon S3 Express endpoing based on the region diff --git a/vendor/github.com/minio/minio-go/v7/functional_tests.go b/vendor/github.com/minio/minio-go/v7/functional_tests.go index 3ade9a6aff1..4f8f9dd8cc7 100644 --- a/vendor/github.com/minio/minio-go/v7/functional_tests.go +++ b/vendor/github.com/minio/minio-go/v7/functional_tests.go @@ -88,7 +88,7 @@ func createHTTPTransport() (transport *http.Transport) { transport.TLSClientConfig.InsecureSkipVerify = true } - return + return transport } var readFull = func(r io.Reader, buf []byte) (n int, err error) { @@ -123,7 +123,7 @@ var readFull = func(r io.Reader, buf []byte) (n int, err error) { } else if n > 0 && err == io.EOF { err = io.ErrUnexpectedEOF } - return + return n, err } func baseLogger(testName, function string, args map[string]interface{}, startTime time.Time) *slog.Logger { @@ -282,7 +282,7 @@ var mintDataDir = os.Getenv("MINT_DATA_DIR") func getMintDataDirFilePath(filename string) (fp string) { if mintDataDir == "" { - return + return fp } return filepath.Join(mintDataDir, filename) } @@ -2075,6 +2075,9 @@ func testPutObjectWithChecksums() { cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"]) cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) cmpChecksum(resp.ChecksumCRC64NVME, meta["x-amz-checksum-crc64nvme"]) + if resp.ChecksumMode != minio.ChecksumFullObjectMode.String() { + logError(testName, function, args, startTime, "", "Checksum mode is not full object", fmt.Errorf("got %s, want %s", resp.ChecksumMode, minio.ChecksumFullObjectMode.String())) + } // Read the data back gopts := minio.GetObjectOptions{Checksum: true} @@ -2095,6 +2098,9 @@ func testPutObjectWithChecksums() { cmpChecksum(st.ChecksumCRC32, meta["x-amz-checksum-crc32"]) cmpChecksum(st.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) cmpChecksum(st.ChecksumCRC64NVME, meta["x-amz-checksum-crc64nvme"]) + if st.ChecksumMode != minio.ChecksumFullObjectMode.String() { + logError(testName, function, args, startTime, "", "Checksum mode is not full object", fmt.Errorf("got %s, want %s", st.ChecksumMode, minio.ChecksumFullObjectMode.String())) + } if st.Size != int64(bufSize) { logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err) @@ -2435,7 +2441,10 @@ func testPutMultipartObjectWithChecksums() { reader.Close() h := test.cs.Hasher() h.Reset() - want := hashMultiPart(b, partSize, test.cs) + // wantChksm might be the full object checksum or the multipart checksum, depending on the test.cs type. + wantChksm := hashMultiPart(b, partSize, test.cs) + // wantFullObjectChksm is always the full object checksum that is returned after CopyObject. + wantFullObjectChksm := hashMultiPart(b, len(b), test.cs) rd := bytes.NewReader(b) cs := test.cs @@ -2456,15 +2465,15 @@ func testPutMultipartObjectWithChecksums() { switch test.cs.Base() { case minio.ChecksumCRC32C: - cmpChecksum(resp.ChecksumCRC32C, want) + cmpChecksum(resp.ChecksumCRC32C, wantChksm) case minio.ChecksumCRC32: - cmpChecksum(resp.ChecksumCRC32, want) + cmpChecksum(resp.ChecksumCRC32, wantChksm) case minio.ChecksumSHA1: - cmpChecksum(resp.ChecksumSHA1, want) + cmpChecksum(resp.ChecksumSHA1, wantChksm) case minio.ChecksumSHA256: - cmpChecksum(resp.ChecksumSHA256, want) + cmpChecksum(resp.ChecksumSHA256, wantChksm) case minio.ChecksumCRC64NVME: - cmpChecksum(resp.ChecksumCRC64NVME, want) + cmpChecksum(resp.ChecksumCRC64NVME, wantChksm) } args["section"] = "HeadObject" @@ -2475,15 +2484,51 @@ func testPutMultipartObjectWithChecksums() { } switch test.cs.Base() { case minio.ChecksumCRC32C: - cmpChecksum(st.ChecksumCRC32C, want) + cmpChecksum(st.ChecksumCRC32C, wantChksm) case minio.ChecksumCRC32: - cmpChecksum(st.ChecksumCRC32, want) + cmpChecksum(st.ChecksumCRC32, wantChksm) case minio.ChecksumSHA1: - cmpChecksum(st.ChecksumSHA1, want) + cmpChecksum(st.ChecksumSHA1, wantChksm) case minio.ChecksumSHA256: - cmpChecksum(st.ChecksumSHA256, want) + cmpChecksum(st.ChecksumSHA256, wantChksm) case minio.ChecksumCRC64NVME: - cmpChecksum(st.ChecksumCRC64NVME, want) + cmpChecksum(st.ChecksumCRC64NVME, wantChksm) + } + + // Use the CopyObject API to make a copy, in the case it was a composite checksum, + // it will change because the copy is no longer a multipart object. S3 returns the checksum + // of the full object when HeadObject is called on the copy. + args["section"] = "CopyObject" + objectCopyName := objectName + "-copy" + _, err = c.CopyObject(context.Background(), minio.CopyDestOptions{ + Bucket: bucketName, + Object: objectCopyName, + }, minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + }) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + args["section"] = "HeadObject-Copy" + st, err = c.StatObject(context.Background(), bucketName, objectCopyName, minio.StatObjectOptions{Checksum: true}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + switch test.cs.Base() { + case minio.ChecksumCRC32C: + cmpChecksum(st.ChecksumCRC32C, wantFullObjectChksm) + case minio.ChecksumCRC32: + cmpChecksum(st.ChecksumCRC32, wantFullObjectChksm) + case minio.ChecksumSHA1: + cmpChecksum(st.ChecksumSHA1, wantFullObjectChksm) + case minio.ChecksumSHA256: + cmpChecksum(st.ChecksumSHA256, wantFullObjectChksm) + case minio.ChecksumCRC64NVME: + cmpChecksum(st.ChecksumCRC64NVME, wantFullObjectChksm) } args["section"] = "GetObjectAttributes" @@ -2493,19 +2538,19 @@ func testPutMultipartObjectWithChecksums() { return } - if strings.ContainsRune(want, '-') { - want = want[:strings.IndexByte(want, '-')] + if strings.ContainsRune(wantChksm, '-') { + wantChksm = wantChksm[:strings.IndexByte(wantChksm, '-')] } switch test.cs { // Full Object CRC does not return anything with GetObjectAttributes case minio.ChecksumCRC32C: - cmpChecksum(s.Checksum.ChecksumCRC32C, want) + cmpChecksum(s.Checksum.ChecksumCRC32C, wantChksm) case minio.ChecksumCRC32: - cmpChecksum(s.Checksum.ChecksumCRC32, want) + cmpChecksum(s.Checksum.ChecksumCRC32, wantChksm) case minio.ChecksumSHA1: - cmpChecksum(s.Checksum.ChecksumSHA1, want) + cmpChecksum(s.Checksum.ChecksumSHA1, wantChksm) case minio.ChecksumSHA256: - cmpChecksum(s.Checksum.ChecksumSHA256, want) + cmpChecksum(s.Checksum.ChecksumSHA256, wantChksm) } // Read the data back @@ -2529,22 +2574,22 @@ func testPutMultipartObjectWithChecksums() { // Test part 2 checksum... h.Reset() h.Write(b[partSize : 2*partSize]) - want = base64.StdEncoding.EncodeToString(h.Sum(nil)) + wantChksm = base64.StdEncoding.EncodeToString(h.Sum(nil)) switch test.cs { // Full Object CRC does not return any part CRC for whatever reason. case minio.ChecksumCRC32C: - cmpChecksum(st.ChecksumCRC32C, want) + cmpChecksum(st.ChecksumCRC32C, wantChksm) case minio.ChecksumCRC32: - cmpChecksum(st.ChecksumCRC32, want) + cmpChecksum(st.ChecksumCRC32, wantChksm) case minio.ChecksumSHA1: - cmpChecksum(st.ChecksumSHA1, want) + cmpChecksum(st.ChecksumSHA1, wantChksm) case minio.ChecksumSHA256: - cmpChecksum(st.ChecksumSHA256, want) + cmpChecksum(st.ChecksumSHA256, wantChksm) case minio.ChecksumCRC64NVME: // AWS doesn't return part checksum, but may in the future. if st.ChecksumCRC64NVME != "" { - cmpChecksum(st.ChecksumCRC64NVME, want) + cmpChecksum(st.ChecksumCRC64NVME, wantChksm) } } @@ -3321,7 +3366,7 @@ func validateObjectAttributeRequest(OA *minio.ObjectAttributes, opts *minio.Obje if opts.VersionID != "" { if OA.VersionID != opts.VersionID { err = fmt.Errorf("Expected versionId %s but got versionId %s", opts.VersionID, OA.VersionID) - return + return err } } @@ -3348,12 +3393,12 @@ func validateObjectAttributeRequest(OA *minio.ObjectAttributes, opts *minio.Obje if test.HasPartChecksums { if partsMissingChecksum { err = fmt.Errorf("One or all parts were missing a checksum") - return + return err } } else { if foundPartChecksum { err = fmt.Errorf("Did not expect ObjectParts to have checksums but found one") - return + return err } } @@ -3365,52 +3410,52 @@ func validateObjectAttributeRequest(OA *minio.ObjectAttributes, opts *minio.Obje if test.HasFullChecksum { if !hasFullObjectChecksum { err = fmt.Errorf("Full object checksum not found") - return + return err } } else { if hasFullObjectChecksum { err = fmt.Errorf("Did not expect a full object checksum but we got one") - return + return err } } if OA.ETag != test.ETag { err = fmt.Errorf("Etags do not match, got %s but expected %s", OA.ETag, test.ETag) - return + return err } if test.HasParts { if len(OA.ObjectParts.Parts) < 1 { err = fmt.Errorf("Was expecting ObjectParts but none were present") - return + return err } } if OA.StorageClass == "" { err = fmt.Errorf("Was expecting a StorageClass but got none") - return + return err } if OA.ObjectSize != test.ObjectSize { err = fmt.Errorf("Was expecting a ObjectSize but got none") - return + return err } if test.HasParts { if opts.MaxParts == 0 { if len(OA.ObjectParts.Parts) != OA.ObjectParts.PartsCount { err = fmt.Errorf("expected %s parts but got %d", OA.ObjectParts.PartsCount, len(OA.ObjectParts.Parts)) - return + return err } } else if (opts.MaxParts + opts.PartNumberMarker) > OA.ObjectParts.PartsCount { if len(OA.ObjectParts.Parts) != (OA.ObjectParts.PartsCount - opts.PartNumberMarker) { err = fmt.Errorf("expected %d parts but got %d", (OA.ObjectParts.PartsCount - opts.PartNumberMarker), len(OA.ObjectParts.Parts)) - return + return err } } else if opts.MaxParts != 0 { if opts.MaxParts != len(OA.ObjectParts.Parts) { err = fmt.Errorf("expected %d parts but got %d", opts.MaxParts, len(OA.ObjectParts.Parts)) - return + return err } } } @@ -3418,18 +3463,18 @@ func validateObjectAttributeRequest(OA *minio.ObjectAttributes, opts *minio.Obje if OA.ObjectParts.NextPartNumberMarker == OA.ObjectParts.PartsCount { if OA.ObjectParts.IsTruncated { err = fmt.Errorf("Expected ObjectParts to NOT be truncated, but it was") - return + return err } } if OA.ObjectParts.NextPartNumberMarker != OA.ObjectParts.PartsCount { if !OA.ObjectParts.IsTruncated { err = fmt.Errorf("Expected ObjectParts to be truncated, but it was NOT") - return + return err } } - return + return err } // Test PutObject using a large data to trigger multipart readat @@ -3645,6 +3690,93 @@ func testPutObjectStreaming() { logSuccess(testName, function, args, startTime) } +// Test PutObject with preconditions on non-existent objects +func testPutObjectPreconditionOnNonExistent() { + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, size, opts) with preconditions" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "minio.PutObjectOptions{SetMatchETag/SetMatchETagExcept}", + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Test 1: PutObject with SetMatchETag on non-existent object should fail + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "test-object-") + args["objectName"] = objectName + + data := bytes.NewReader([]byte("test data")) + + opts := minio.PutObjectOptions{} + opts.SetMatchETag("some-etag") + + _, err = c.PutObject(context.Background(), bucketName, objectName, data, int64(data.Len()), opts) + if err == nil { + logError(testName, function, args, startTime, "", "PutObject with SetMatchETag on non-existent object should have failed", nil) + return + } + + errResp := minio.ToErrorResponse(err) + if errResp.Code != "NoSuchKey" { + logError(testName, function, args, startTime, "", fmt.Sprintf("Expected NoSuchKey error (AWS standard for non-existent objects), got %s", errResp.Code), err) + return + } + + // Test 2: PutObject with SetMatchETagExcept (If-None-Match) on non-existent object should succeed + objectName2 := randString(60, rand.NewSource(time.Now().UnixNano()), "test-object2-") + args["objectName"] = objectName2 + + data2 := bytes.NewReader([]byte("test data 2")) + opts2 := minio.PutObjectOptions{} + opts2.SetMatchETagExcept("some-etag") + + _, err = c.PutObject(context.Background(), bucketName, objectName2, data2, int64(data2.Len()), opts2) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject with SetMatchETagExcept (If-None-Match) on non-existent object should have succeeded", err) + return + } + // Test 3: CompleteMultipartUpload with preconditions on non-existent object should fail + objectName3 := randString(60, rand.NewSource(time.Now().UnixNano()), "test-multipart-") + args["objectName"] = objectName3 + + data3 := bytes.Repeat([]byte("a"), 5*1024*1024+1) + reader3 := bytes.NewReader(data3) + + opts3 := minio.PutObjectOptions{} + opts3.SetMatchETag("non-existent-etag") + + _, err = c.PutObject(context.Background(), bucketName, objectName3, reader3, int64(len(data3)), opts3) + if err == nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload with SetMatchETag on non-existent object should have failed", nil) + return + } + + errResp = minio.ToErrorResponse(err) + if errResp.Code != "NoSuchKey" { + logError(testName, function, args, startTime, "", fmt.Sprintf("Expected NoSuchKey error (AWS standard for non-existent objects) for multipart, got %s", errResp.Code), err) + return + } + + logSuccess(testName, function, args, startTime) +} + // Test get object seeker from the end, using whence set to '2'. func testGetObjectSeekEnd() { // initialize logging params @@ -8653,6 +8785,7 @@ func testCopyObjectV2() { logError(testName, function, args, startTime, "", "GetObject failed", err) return } + // Check the various fields of source object against destination object. objInfo, err = r.Stat() if err != nil { @@ -8691,6 +8824,260 @@ func testCopyObjectV2() { logSuccess(testName, function, args, startTime) } +// Tests copy object with various checksum scenarios, tries to not repeat CopyObjectV2 test and +// instead just focus on Checksum. +func testCopyObjectWithChecksums() { + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectWithChecksums(destination, source)" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{CredsV2: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, c) + + // Make a new bucket in 'us-east-1' (destination bucket). + err = c.MakeBucket(context.Background(), bucketName+"-copy", minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName+"-copy", c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + // PutObject to upload the object to the bucket, this object will have a Crc64NVME checksum applied + // by default since nothing was explicitly specified. + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + // GetObject to obtain the eTag + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + objInfo, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + r.Close() + + // Copy source options + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + MatchModifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), + MatchETag: objInfo.ETag, + } + + tests := []struct { + csType minio.ChecksumType + cs wantChecksums + }{ + {csType: minio.ChecksumCRC64NVME, cs: wantChecksums{minio.ChecksumCRC64NVME: "iRtfQH3xflQ="}}, + {csType: minio.ChecksumCRC32C, cs: wantChecksums{minio.ChecksumCRC32C: "aHnJMw=="}}, + {csType: minio.ChecksumCRC32, cs: wantChecksums{minio.ChecksumCRC32: "tIZ8hA=="}}, + {csType: minio.ChecksumSHA1, cs: wantChecksums{minio.ChecksumSHA1: "6YIIbcWH1iLaCFqs5vwq5Rwvm+o="}}, + {csType: minio.ChecksumSHA256, cs: wantChecksums{minio.ChecksumSHA256: "GKeJTopbMGPs3h4fAw4oe0R2QnnmFVJeIWkqCkp28Yo="}}, + // In S3, all copied objects without checksums and specified destination checksum algorithms + // automatically gain a CRC-64NVME checksum algorithm. Use ChecksumNone for this case. + {csType: minio.ChecksumNone, cs: wantChecksums{minio.ChecksumCRC64NVME: "iRtfQH3xflQ="}}, + } + + for _, test := range tests { + args := map[string]interface{}{} + args["srcOpts"] = src + args["section"] = "setup" + args["checksum"] = test.csType.String() + + // Copy destination options + bucketCopyName := bucketName + "-copy" + objectCopyName := objectName + "-copy-" + test.csType.String() + dst := minio.CopyDestOptions{ + Bucket: bucketCopyName, + Object: objectCopyName, + ReplaceMetadata: true, + } + if test.csType != minio.ChecksumNone { + // Request the server-side checksum on the copy. + // ChecksumNone is a flag to leave off the header + dst.ChecksumType = test.csType + } + args["destOpts"] = dst + + // Perform the Copy + args["section"] = "CopyObject" + _, err = c.CopyObject(context.Background(), dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + // Checksum verification + args["section"] = "HeadObject" + st, err := c.StatObject(context.Background(), bucketCopyName, objectCopyName, minio.StatObjectOptions{Checksum: true}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if st.ChecksumMode != "FULL_OBJECT" { + logError(testName, function, args, startTime, "", "ChecksumMode want: FULL_OBJECT, got "+st.ChecksumMode, nil) + return + } + err = cmpChecksum(st, test.cs) + if err != nil { + logError(testName, function, args, startTime, "", "Checksum mismatch", err) + return + } + + logSuccess(testName, function, args, startTime) + } +} + +// Tests replacing an object with CopyObject and a new Checksum type +func testReplaceObjectWithChecksums() { + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectWithChecksums(destination, source)" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{CredsV2: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, c) + + tests := []struct { + csType minio.ChecksumType + cs wantChecksums + }{ + {csType: minio.ChecksumCRC64NVME, cs: wantChecksums{minio.ChecksumCRC64NVME: "iRtfQH3xflQ="}}, + {csType: minio.ChecksumCRC32C, cs: wantChecksums{minio.ChecksumCRC32C: "aHnJMw=="}}, + {csType: minio.ChecksumCRC32, cs: wantChecksums{minio.ChecksumCRC32: "tIZ8hA=="}}, + {csType: minio.ChecksumSHA1, cs: wantChecksums{minio.ChecksumSHA1: "6YIIbcWH1iLaCFqs5vwq5Rwvm+o="}}, + {csType: minio.ChecksumSHA256, cs: wantChecksums{minio.ChecksumSHA256: "GKeJTopbMGPs3h4fAw4oe0R2QnnmFVJeIWkqCkp28Yo="}}, + // In S3, all copied objects without checksums and specified destination checksum algorithms + // automatically gain a CRC-64NVME checksum algorithm. Use ChecksumNone for this case. + {csType: minio.ChecksumNone, cs: wantChecksums{minio.ChecksumCRC64NVME: "iRtfQH3xflQ="}}, + } + + for _, test := range tests { + args := map[string]interface{}{} + args["section"] = "setup" + args["destOpts"] = "" + args["checksum"] = test.csType.String() + + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + // PutObject to upload the object to the bucket + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + // GetObject to obtain the eTag + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + objInfo, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + r.Close() + + // Copy source options + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + MatchModifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), + MatchETag: objInfo.ETag, + } + + // Copy destination options, overwrite the existing object + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: objectName, + // S3 requires that we send some new metadata otherwise it complains that the + // CopyObject is illegal. + UserMetadata: map[string]string{ + "TestMeta": objectName + "-meta-" + test.csType.String(), + }, + ReplaceMetadata: true, + } + if test.csType != minio.ChecksumNone { + // Request the server-side checksum on the copy. + // ChecksumNone is a flag to leave off the header + dst.ChecksumType = test.csType + } + args["destOpts"] = dst + + // Perform the Copy + args["section"] = "CopyObject" + _, err = c.CopyObject(context.Background(), dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + // Checksum verification + args["section"] = "HeadObject" + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{Checksum: true}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if st.ChecksumMode != "FULL_OBJECT" { + logError(testName, function, args, startTime, "", "ChecksumMode want: FULL_OBJECT, got "+st.ChecksumMode, nil) + return + } + err = cmpChecksum(st, test.cs) + if err != nil { + logError(testName, function, args, startTime, "", "Checksum mismatch", err) + return + } + + logSuccess(testName, function, args, startTime) + } +} + func testComposeObjectErrorCasesWrapper(c *minio.Client) { // initialize logging params startTime := time.Now() @@ -8976,6 +9363,7 @@ func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc, testName := getFuncNameLoc(2) function := "CopyObject(destination, source)" args := map[string]interface{}{} + args["testName"] = testName var srcEncryption, dstEncryption encrypt.ServerSide // Make a new bucket in 'us-east-1' (source bucket). @@ -8990,8 +9378,19 @@ func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc, // 1. create an sse-c encrypted object to copy by uploading const srcSize = 1024 * 1024 buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB + + // Calculate the CRC32C checksum for the object + meta := map[string]string{} + h := minio.ChecksumCRC32C.Hasher() + h.Reset() + h.Write(buf) + meta[minio.ChecksumCRC32C.Key()] = base64.StdEncoding.EncodeToString(h.Sum(nil)) + _, err = c.PutObject(context.Background(), bucketName, "srcObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ ServerSideEncryption: sseSrc, + DisableMultipart: true, + DisableContentSha256: true, + UserMetadata: meta, }) if err != nil { logError(testName, function, args, startTime, "", "PutObject call failed", err) @@ -9028,7 +9427,7 @@ func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc, } // 3. get copied object and check if content is equal coreClient := minio.Core{Client: c} - reader, _, _, err := coreClient.GetObject(context.Background(), bucketName, "dstObject", minio.GetObjectOptions{ServerSideEncryption: dstEncryption}) + reader, oi, _, err := coreClient.GetObject(context.Background(), bucketName, "dstObject", minio.GetObjectOptions{ServerSideEncryption: dstEncryption, Checksum: true}) if err != nil { logError(testName, function, args, startTime, "", "GetObject failed", err) return @@ -9045,6 +9444,12 @@ func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc, } reader.Close() + err = cmpChecksum(oi, wantChecksums{minio.ChecksumCRC32C: "bSoobA=="}) + if err != nil { + logError(testName, function, args, startTime, "", "Checksum mismatch on dstObject", err) + return + } + // Test key rotation for source object in-place. var newSSE encrypt.ServerSide if sseSrc != nil && sseSrc.Type() == encrypt.SSEC { @@ -9068,7 +9473,7 @@ func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc, } // Get copied object and check if content is equal - reader, _, _, err = coreClient.GetObject(context.Background(), bucketName, "srcObject", minio.GetObjectOptions{ServerSideEncryption: newSSE}) + reader, oi, _, err = coreClient.GetObject(context.Background(), bucketName, "srcObject", minio.GetObjectOptions{ServerSideEncryption: newSSE, Checksum: true}) if err != nil { logError(testName, function, args, startTime, "", "GetObject failed", err) return @@ -9085,6 +9490,13 @@ func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc, } reader.Close() + err = cmpChecksum(oi, wantChecksums{minio.ChecksumCRC32C: "bSoobA=="}) + if err != nil { + fmt.Printf("srcObject objectInfo: %+v\n", oi) + logError(testName, function, args, startTime, "", "Checksum mismatch on srcObject for in-place", err) + return + } + // Test in-place decryption. dst = minio.CopyDestOptions{ Bucket: bucketName, @@ -9106,7 +9518,7 @@ func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc, } // Get copied decrypted object and check if content is equal - reader, _, _, err = coreClient.GetObject(context.Background(), bucketName, "srcObject", minio.GetObjectOptions{}) + reader, oi, _, err = coreClient.GetObject(context.Background(), bucketName, "srcObject", minio.GetObjectOptions{Checksum: true}) if err != nil { logError(testName, function, args, startTime, "", "GetObject failed", err) return @@ -9123,6 +9535,12 @@ func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc, return } + err = cmpChecksum(oi, wantChecksums{minio.ChecksumCRC32C: "bSoobA=="}) + if err != nil { + logError(testName, function, args, startTime, "", "Checksum mismatch for decrypted object", err) + return + } + logSuccess(testName, function, args, startTime) } @@ -9134,7 +9552,7 @@ func testUnencryptedToSSECCopyObject() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + c, err := NewClient(ClientConfig{TrailingHeaders: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -9154,7 +9572,7 @@ func testUnencryptedToSSES3CopyObject() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + c, err := NewClient(ClientConfig{TrailingHeaders: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -9175,7 +9593,7 @@ func testUnencryptedToUnencryptedCopyObject() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + c, err := NewClient(ClientConfig{TrailingHeaders: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -9195,7 +9613,7 @@ func testEncryptedSSECToSSECCopyObject() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + c, err := NewClient(ClientConfig{TrailingHeaders: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -9216,7 +9634,7 @@ func testEncryptedSSECToSSES3CopyObject() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + c, err := NewClient(ClientConfig{TrailingHeaders: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -9237,7 +9655,7 @@ func testEncryptedSSECToUnencryptedCopyObject() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + c, err := NewClient(ClientConfig{TrailingHeaders: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -9258,7 +9676,7 @@ func testEncryptedSSES3ToSSECCopyObject() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + c, err := NewClient(ClientConfig{TrailingHeaders: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -9279,7 +9697,7 @@ func testEncryptedSSES3ToSSES3CopyObject() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + c, err := NewClient(ClientConfig{TrailingHeaders: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -9300,7 +9718,7 @@ func testEncryptedSSES3ToUnencryptedCopyObject() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + c, err := NewClient(ClientConfig{TrailingHeaders: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -9321,7 +9739,7 @@ func testEncryptedCopyObjectV2() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{CredsV2: true}) + c, err := NewClient(ClientConfig{CredsV2: true, TrailingHeaders: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -10080,7 +10498,7 @@ func testUnencryptedToSSECCopyObjectPart() { function := "CopyObjectPart(destination, source)" args := map[string]interface{}{} - client, err := NewClient(ClientConfig{}) + client, err := NewClient(ClientConfig{TrailingHeaders: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return @@ -11080,7 +11498,7 @@ func testUserMetadataCopyingWrapper(c *minio.Client) { objInfo, err := c.StatObject(context.Background(), bucketName, object, minio.StatObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "Stat failed", err) - return + return h } h = make(http.Header) for k, vs := range objInfo.Metadata { @@ -11256,7 +11674,7 @@ func testStorageClassMetadataPutObject() { objInfo, err := c.StatObject(context.Background(), bucketName, object, minio.StatObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "Stat failed", err) - return + return h } h = make(http.Header) for k, vs := range objInfo.Metadata { @@ -11377,7 +11795,7 @@ func testStorageClassMetadataCopyObject() { args["object"] = object if err != nil { logError(testName, function, args, startTime, "", "Stat failed", err) - return + return h } h = make(http.Header) for k, vs := range objInfo.Metadata { @@ -14195,6 +14613,29 @@ func mustParseBool(str string) bool { return b } +// wantChecksums is a map of expected checksums for an object. +type wantChecksums map[minio.ChecksumType]string + +// cmpChecksum compares the checksums of an object against expected values. +func cmpChecksum(oi minio.ObjectInfo, chksums wantChecksums) error { + if oi.ChecksumCRC64NVME != chksums[minio.ChecksumCRC64NVME] { + return fmt.Errorf("Checksum mismatch for CRC64NVME, want: %s, got: %s", chksums[minio.ChecksumCRC64NVME], oi.ChecksumCRC64NVME) + } + if oi.ChecksumCRC32C != chksums[minio.ChecksumCRC32C] { + return fmt.Errorf("Checksum mismatch for CRC32C, want: %s, got: %s", chksums[minio.ChecksumCRC32C], oi.ChecksumCRC32C) + } + if oi.ChecksumCRC32 != chksums[minio.ChecksumCRC32] { + return fmt.Errorf("Checksum mismatch for CRC32, want: %s, got: %s", chksums[minio.ChecksumCRC32], oi.ChecksumCRC32) + } + if oi.ChecksumSHA1 != chksums[minio.ChecksumSHA1] { + return fmt.Errorf("Checksum mismatch for SHA1, want: %s, got: %s", chksums[minio.ChecksumSHA1], oi.ChecksumSHA1) + } + if oi.ChecksumSHA256 != chksums[minio.ChecksumSHA256] { + return fmt.Errorf("Checksum mismatch for SHA256, want: %s, got: %s", chksums[minio.ChecksumSHA256], oi.ChecksumSHA256) + } + return nil +} + func main() { slog.SetDefault(slog.New(slog.NewJSONHandler( os.Stdout, @@ -14219,6 +14660,8 @@ func main() { // execute tests if isFullMode() { + testCopyObjectWithChecksums() + testReplaceObjectWithChecksums() testCorsSetGetDelete() testCors() testListMultipartUpload() @@ -14253,6 +14696,7 @@ func main() { testPutObjectWithMetadata() testPutObjectReadAt() testPutObjectStreaming() + testPutObjectPreconditionOnNonExistent() testGetObjectSeekEnd() testGetObjectClosedTwice() testGetObjectS3Zip() diff --git a/vendor/github.com/minio/minio-go/v7/internal/json/json_goccy.go b/vendor/github.com/minio/minio-go/v7/internal/json/json_goccy.go deleted file mode 100644 index 8fc33849f66..00000000000 --- a/vendor/github.com/minio/minio-go/v7/internal/json/json_goccy.go +++ /dev/null @@ -1,49 +0,0 @@ -//go:build !stdlibjson - -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2025 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package json - -import "github.com/goccy/go-json" - -// This file defines the JSON functions used internally and forwards them -// to goccy/go-json. Alternatively, the standard library can be used by setting -// the build tag stdlibjson. This can be useful for testing purposes or if -// goccy/go-json causes issues. -// -// This file does not contain all definitions from goccy/go-json; if needed, more -// can be added, but keep in mind that json_stdlib.go will also need to be -// updated. - -var ( - // Unmarshal is a wrapper around goccy/go-json Unmarshal function. - Unmarshal = json.Unmarshal - // Marshal is a wrapper around goccy/go-json Marshal function. - Marshal = json.Marshal - // NewEncoder is a wrapper around goccy/go-json NewEncoder function. - NewEncoder = json.NewEncoder - // NewDecoder is a wrapper around goccy/go-json NewDecoder function. - NewDecoder = json.NewDecoder -) - -type ( - // Encoder is an alias for goccy/go-json Encoder. - Encoder = json.Encoder - // Decoder is an alias for goccy/go-json Decoder. - Decoder = json.Decoder -) diff --git a/vendor/github.com/minio/minio-go/v7/internal/json/json_stdlib.go b/vendor/github.com/minio/minio-go/v7/internal/json/json_stdlib.go deleted file mode 100644 index a671fead313..00000000000 --- a/vendor/github.com/minio/minio-go/v7/internal/json/json_stdlib.go +++ /dev/null @@ -1,49 +0,0 @@ -//go:build stdlibjson - -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2025 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package json - -import "encoding/json" - -// This file defines the JSON functions used internally and forwards them -// to encoding/json. This is only enabled by setting the build tag stdlibjson, -// otherwise json_goccy.go applies. -// This can be useful for testing purposes or if goccy/go-json (which is used otherwise) causes issues. -// -// This file does not contain all definitions from encoding/json; if needed, more -// can be added, but keep in mind that json_goccy.go will also need to be -// updated. - -var ( - // Unmarshal is a wrapper around encoding/json Unmarshal function. - Unmarshal = json.Unmarshal - // Marshal is a wrapper around encoding/json Marshal function. - Marshal = json.Marshal - // NewEncoder is a wrapper around encoding/json NewEncoder function. - NewEncoder = json.NewEncoder - // NewDecoder is a wrapper around encoding/json NewDecoder function. - NewDecoder = json.NewDecoder -) - -type ( - // Encoder is an alias for encoding/json Encoder. - Encoder = json.Encoder - // Decoder is an alias for encoding/json Decoder. - Decoder = json.Decoder -) diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go index c9a52252a44..0c83fc7fa4c 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go @@ -18,6 +18,7 @@ package credentials import ( + "encoding/json" "errors" "os" "os/exec" @@ -26,7 +27,6 @@ import ( "time" "github.com/go-ini/ini" - "github.com/minio/minio-go/v7/internal/json" ) // A externalProcessCredentials stores the output of a credential_process diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go index 398952ee98b..b78dcaccf8f 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go @@ -18,11 +18,10 @@ package credentials import ( + "encoding/json" "os" "path/filepath" "runtime" - - "github.com/minio/minio-go/v7/internal/json" ) // A FileMinioClient retrieves credentials from the current user's home diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go index edc98846792..f4f7c8f7e29 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go @@ -20,6 +20,7 @@ package credentials import ( "bufio" "context" + "encoding/json" "errors" "fmt" "io" @@ -30,8 +31,6 @@ import ( "path" "strings" "time" - - "github.com/minio/minio-go/v7/internal/json" ) // DefaultExpiryWindow - Default expiry window. diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go index 162f460eea5..e9e7a1151f1 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go @@ -132,7 +132,7 @@ func (c *CustomTokenIdentity) RetrieveWithCredContext(cc *CredContext) (value Va r := AssumeRoleWithCustomTokenResponse{} if err = xml.NewDecoder(resp.Body).Decode(&r); err != nil { - return + return value, err } cr := r.Result.Credentials diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go index 31fe10ae039..7e80cd6a2ac 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go @@ -74,6 +74,9 @@ type LDAPIdentity struct { // requested from LDAP. RequestedExpiry time.Duration + // Optional, if empty applies to default config + ConfigName string + // Optional, used for token revokation TokenRevokeType string } @@ -110,6 +113,13 @@ func LDAPIdentityExpiryOpt(d time.Duration) LDAPIdentityOpt { } } +// LDAPIdentityConfigNameOpt sets the config name for requested credentials. +func LDAPIdentityConfigNameOpt(name string) LDAPIdentityOpt { + return func(k *LDAPIdentity) { + k.ConfigName = name + } +} + // NewLDAPIdentityWithSessionPolicy returns new credentials object that uses // LDAP Identity with a specified session policy. The `policy` parameter must be // a JSON string specifying the policy document. @@ -158,6 +168,9 @@ func (k *LDAPIdentity) RetrieveWithCredContext(cc *CredContext) (value Value, er if k.TokenRevokeType != "" { v.Set("TokenRevokeType", k.TokenRevokeType) } + if k.ConfigName != "" { + v.Set("ConfigName", k.ConfigName) + } req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode())) if err != nil { @@ -201,7 +214,7 @@ func (k *LDAPIdentity) RetrieveWithCredContext(cc *CredContext) (value Value, er r := AssumeRoleWithLDAPResponse{} if err = xml.NewDecoder(resp.Body).Decode(&r); err != nil { - return + return value, err } cr := r.Result.Credentials diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go index 1fc510ae069..0a8a7baa20d 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go @@ -20,10 +20,10 @@ package encrypt import ( "crypto/md5" "encoding/base64" + "encoding/json" "errors" "net/http" - "github.com/minio/minio-go/v7/internal/json" "golang.org/x/crypto/argon2" ) diff --git a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go index cf1ba038f74..7ed98b0d133 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go @@ -19,11 +19,10 @@ package lifecycle import ( + "encoding/json" "encoding/xml" "errors" "time" - - "github.com/minio/minio-go/v7/internal/json" ) var errMissingStorageClass = errors.New("storage-class cannot be empty") diff --git a/vendor/github.com/minio/minio-go/v7/pkg/peeker/peek-reader-closer.go b/vendor/github.com/minio/minio-go/v7/pkg/peeker/peek-reader-closer.go deleted file mode 100644 index 26c9cf6377a..00000000000 --- a/vendor/github.com/minio/minio-go/v7/pkg/peeker/peek-reader-closer.go +++ /dev/null @@ -1,73 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2025 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package peeker - -import ( - "bytes" - "errors" - "io" -) - -// PeekReadCloser offers a way to peek a ReadCloser stream and then -// return the exact stream of the underlying ReadCloser -type PeekReadCloser struct { - io.ReadCloser - - recordMode bool - recordMaxBuf int - recordBuf *bytes.Buffer -} - -// ReplayFromStart ensures next Read() will restart to stream the -// underlying ReadCloser stream from the beginning -func (prc *PeekReadCloser) ReplayFromStart() { - prc.recordMode = false -} - -func (prc *PeekReadCloser) Read(p []byte) (int, error) { - if prc.recordMode { - if prc.recordBuf.Len() > prc.recordMaxBuf { - return 0, errors.New("maximum peek buffer exceeded") - } - n, err := prc.ReadCloser.Read(p) - prc.recordBuf.Write(p[:n]) - return n, err - } - // Replay mode - if prc.recordBuf.Len() > 0 { - pn, _ := prc.recordBuf.Read(p) - return pn, nil - } - return prc.ReadCloser.Read(p) -} - -// Close releases the record buffer memory and close the underlying ReadCloser -func (prc *PeekReadCloser) Close() error { - prc.recordBuf.Reset() - return prc.ReadCloser.Close() -} - -// NewPeekReadCloser returns a new peek reader -func NewPeekReadCloser(rc io.ReadCloser, maxBufSize int) *PeekReadCloser { - return &PeekReadCloser{ - ReadCloser: rc, - recordMode: true, // recording mode by default - recordBuf: bytes.NewBuffer(make([]byte, 0, 1024)), - recordMaxBuf: maxBufSize, - } -} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go index 2f7993f4b49..cc17a3531f8 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go @@ -1014,7 +1014,7 @@ func (q ReplQueueStats) QStats() (r ReplQStats) { if len(q.Nodes) > 0 { r.Uptime /= int64(len(q.Nodes)) // average uptime } - return + return r } // MetricsV2 represents replication metrics for a bucket. diff --git a/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go b/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go index 8aa92212b9f..c12651b5443 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go @@ -18,10 +18,9 @@ package set import ( + "encoding/json" "fmt" "sort" - - "github.com/minio/minio-go/v7/internal/json" ) // StringSet - uses map as set of strings. diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go index f65c36c7d3d..d15c99ad78d 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go @@ -48,11 +48,11 @@ func encodeURL2Path(req *http.Request, virtualHost bool) (path string) { path = "/" + bucketName path += req.URL.Path path = s3utils.EncodePath(path) - return + return path } } path = s3utils.EncodePath(req.URL.Path) - return + return path } // PreSignV2 - presign the request in following style. diff --git a/vendor/github.com/minio/minio-go/v7/utils.go b/vendor/github.com/minio/minio-go/v7/utils.go index cc96005b9b9..6c8dc943393 100644 --- a/vendor/github.com/minio/minio-go/v7/utils.go +++ b/vendor/github.com/minio/minio-go/v7/utils.go @@ -80,7 +80,7 @@ func amzRestoreToStruct(restore string) (ongoing bool, expTime time.Time, err er return false, time.Time{}, err } } - return + return ongoing, expTime, err } // xmlDecoder provide decoded value in xml. @@ -438,7 +438,7 @@ var readFull = func(r io.Reader, buf []byte) (n int, err error) { } else if n > 0 && err == io.EOF { err = io.ErrUnexpectedEOF } - return + return n, err } // regCred matches credential string in HTTP header diff --git a/vendor/github.com/nats-io/nats-server/v2/server/accounts.go b/vendor/github.com/nats-io/nats-server/v2/server/accounts.go index a9df6e0365a..548f6943ec4 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/accounts.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/accounts.go @@ -3794,7 +3794,7 @@ func (s *Server) updateAccountClaimsWithRefresh(a *Account, ac *jwt.AccountClaim // If JetStream is enabled for this server we will call into configJetStream for the account // regardless of enabled or disabled. It handles both cases. if jsEnabled { - if err := s.configJetStream(a); err != nil { + if err := s.configJetStream(a, nil); err != nil { s.Errorf("Error configuring jetstream for account [%s]: %v", tl, err.Error()) a.mu.Lock() // Absent reload of js server cfg, this is going to be broken until js is disabled @@ -4371,7 +4371,7 @@ func (dr *DirAccResolver) Start(s *Server) error { s.Warnf("DirResolver - Error checking for JetStream support for account %q: %v", pubKey, err) } } else if jsa == nil { - if err = s.configJetStream(acc); err != nil { + if err = s.configJetStream(acc, nil); err != nil { s.Errorf("DirResolver - Error configuring JetStream for account %q: %v", pubKey, err) } } diff --git a/vendor/github.com/nats-io/nats-server/v2/server/ats/ats.go b/vendor/github.com/nats-io/nats-server/v2/server/ats/ats.go index c310ccbd078..50045bfd57f 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/ats/ats.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/ats/ats.go @@ -77,7 +77,10 @@ func AccessTime() int64 { // Return last updated time. v := utime.Load() if v == 0 { - panic("access time service not running") + // Always register a time, the worst case is a stale time. + // On startup, we can register in parallel and could previously panic. + v = time.Now().UnixNano() + utime.Store(v) } return v } diff --git a/vendor/github.com/nats-io/nats-server/v2/server/auth.go b/vendor/github.com/nats-io/nats-server/v2/server/auth.go index 2d735c0e2f6..ede297441b4 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/auth.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/auth.go @@ -1123,7 +1123,8 @@ func (s *Server) processClientOrLeafAuthentication(c *client, opts *Options) (au return ok } - if c.kind == CLIENT { + // Check for the use of simple auth. + if c.kind == CLIENT || c.kind == LEAF { if proxyRequired = opts.ProxyRequired; proxyRequired && !trustedProxy { return setProxyAuthError(ErrAuthProxyRequired) } diff --git a/vendor/github.com/nats-io/nats-server/v2/server/client.go b/vendor/github.com/nats-io/nats-server/v2/server/client.go index b7ef2ba60b5..135df35827a 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/client.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/client.go @@ -237,6 +237,26 @@ const ( pmrMsgImportedFromService ) +type WriteTimeoutPolicy uint8 + +const ( + WriteTimeoutPolicyDefault = iota + WriteTimeoutPolicyClose + WriteTimeoutPolicyRetry +) + +// String returns a human-friendly value. Only used in varz. +func (p WriteTimeoutPolicy) String() string { + switch p { + case WriteTimeoutPolicyClose: + return "close" + case WriteTimeoutPolicyRetry: + return "retry" + default: + return _EMPTY_ + } +} + type client struct { // Here first because of use of atomics, and memory alignment. stats @@ -328,15 +348,16 @@ type pinfo struct { // outbound holds pending data for a socket. type outbound struct { - nb net.Buffers // Pending buffers for send, each has fixed capacity as per nbPool below. - wnb net.Buffers // Working copy of "nb", reused on each flushOutbound call, partial writes may leave entries here for next iteration. - pb int64 // Total pending/queued bytes. - fsp int32 // Flush signals that are pending per producer from readLoop's pcd. - sg *sync.Cond // To signal writeLoop that there is data to flush. - wdl time.Duration // Snapshot of write deadline. - mp int64 // Snapshot of max pending for client. - lft time.Duration // Last flush time for Write. - stc chan struct{} // Stall chan we create to slow down producers on overrun, e.g. fan-in. + nb net.Buffers // Pending buffers for send, each has fixed capacity as per nbPool below. + wnb net.Buffers // Working copy of "nb", reused on each flushOutbound call, partial writes may leave entries here for next iteration. + pb int64 // Total pending/queued bytes. + fsp int32 // Flush signals that are pending per producer from readLoop's pcd. + wtp WriteTimeoutPolicy // What do we do on a write timeout? + sg *sync.Cond // To signal writeLoop that there is data to flush. + wdl time.Duration // Snapshot of write deadline. + mp int64 // Snapshot of max pending for client. + lft time.Duration // Last flush time for Write. + stc chan struct{} // Stall chan we create to slow down producers on overrun, e.g. fan-in. cw *s2.Writer } @@ -698,6 +719,24 @@ func (c *client) initClient() { case c.kind == LEAF && opts.LeafNode.WriteDeadline > 0: c.out.wdl = opts.LeafNode.WriteDeadline } + switch c.kind { + case ROUTER: + if c.out.wtp = opts.Cluster.WriteTimeout; c.out.wtp == WriteTimeoutPolicyDefault { + c.out.wtp = WriteTimeoutPolicyRetry + } + case LEAF: + if c.out.wtp = opts.LeafNode.WriteTimeout; c.out.wtp == WriteTimeoutPolicyDefault { + c.out.wtp = WriteTimeoutPolicyRetry + } + case GATEWAY: + if c.out.wtp = opts.Gateway.WriteTimeout; c.out.wtp == WriteTimeoutPolicyDefault { + c.out.wtp = WriteTimeoutPolicyRetry + } + default: + if c.out.wtp = opts.WriteTimeout; c.out.wtp == WriteTimeoutPolicyDefault { + c.out.wtp = WriteTimeoutPolicyClose + } + } c.out.mp = opts.MaxPending // Snapshot max control line since currently can not be changed on reload and we // were checking it on each call to parse. If this changes and we allow MaxControlLine @@ -1849,7 +1888,7 @@ func (c *client) handleWriteTimeout(written, attempted int64, numChunks int) boo scState, c.out.wdl, numChunks, attempted) // We always close CLIENT connections, or when nothing was written at all... - if c.kind == CLIENT || written == 0 { + if c.out.wtp == WriteTimeoutPolicyClose || written == 0 { c.markConnAsClosed(SlowConsumerWriteDeadline) return true } else { @@ -2548,9 +2587,11 @@ func (c *client) sendPing() { // Generates the INFO to be sent to the client with the client ID included. // info arg will be copied since passed by value. // Assume lock is held. -func (c *client) generateClientInfoJSON(info Info) []byte { +func (c *client) generateClientInfoJSON(info Info, includeClientIP bool) []byte { info.CID = c.cid - info.ClientIP = c.host + if includeClientIP { + info.ClientIP = c.host + } info.MaxPayload = c.mpay if c.isWebsocket() { info.ClientConnectURLs = info.WSConnectURLs @@ -2631,7 +2672,7 @@ func (c *client) processPing() { info.RemoteAccount = c.acc.Name info.IsSystemAccount = c.acc == srv.SystemAccount() info.ConnectInfo = true - c.enqueueProto(c.generateClientInfoJSON(info)) + c.enqueueProto(c.generateClientInfoJSON(info, true)) c.mu.Unlock() srv.mu.Unlock() } @@ -2981,7 +3022,7 @@ func (c *client) processSubEx(subject, queue, bsid []byte, cb msgHandler, noForw return sub, nil } - if err := c.addShadowSubscriptions(acc, sub, true); err != nil { + if err := c.addShadowSubscriptions(acc, sub); err != nil { c.Errorf(err.Error()) } @@ -3011,10 +3052,7 @@ type ime struct { // If the client's account has stream imports and there are matches for this // subscription's subject, then add shadow subscriptions in the other accounts // that export this subject. -// -// enact=false allows MQTT clients to get the list of shadow subscriptions -// without enacting them, in order to first obtain matching "retained" messages. -func (c *client) addShadowSubscriptions(acc *Account, sub *subscription, enact bool) error { +func (c *client) addShadowSubscriptions(acc *Account, sub *subscription) error { if acc == nil { return ErrMissingAccount } @@ -3117,7 +3155,7 @@ func (c *client) addShadowSubscriptions(acc *Account, sub *subscription, enact b for i := 0; i < len(ims); i++ { ime := &ims[i] // We will create a shadow subscription. - nsub, err := c.addShadowSub(sub, ime, enact) + nsub, err := c.addShadowSub(sub, ime) if err != nil { return err } @@ -3134,7 +3172,7 @@ func (c *client) addShadowSubscriptions(acc *Account, sub *subscription, enact b } // Add in the shadow subscription. -func (c *client) addShadowSub(sub *subscription, ime *ime, enact bool) (*subscription, error) { +func (c *client) addShadowSub(sub *subscription, ime *ime) (*subscription, error) { c.mu.Lock() nsub := *sub // copy c.mu.Unlock() @@ -3162,10 +3200,6 @@ func (c *client) addShadowSub(sub *subscription, ime *ime, enact bool) (*subscri } // Else use original subject - if !enact { - return &nsub, nil - } - c.Debugf("Creating import subscription on %q from account %q", nsub.subject, im.acc.Name) if err := im.acc.sl.Insert(&nsub); err != nil { @@ -3196,7 +3230,7 @@ func (c *client) canSubscribe(subject string, optQueue ...string) bool { return true } - allowed := true + allowed, checkAllow := true, true // Optional queue group. var queue string @@ -3204,8 +3238,14 @@ func (c *client) canSubscribe(subject string, optQueue ...string) bool { queue = optQueue[0] } + // For CLIENT connections that are MQTT, or other types of connections, we will + // implicitly allow anything that starts with the "$MQTT." prefix. However, + // we don't just return here, we skip the check for "allow" but will check "deny". + if (c.isMqtt() || (c.kind != CLIENT)) && strings.HasPrefix(subject, mqttPrefix) { + checkAllow = false + } // Check allow list. If no allow list that means all are allowed. Deny can overrule. - if c.perms.sub.allow != nil { + if checkAllow && c.perms.sub.allow != nil { r := c.perms.sub.allow.Match(subject) allowed = len(r.psubs) > 0 if queue != _EMPTY_ && len(r.qsubs) > 0 { @@ -4022,9 +4062,15 @@ func (c *client) pubAllowedFullCheck(subject string, fullCheck, hasLock bool) bo if ok { return v.(bool) } - allowed := true + allowed, checkAllow := true, true + // For CLIENT connections that are MQTT, or other types of connections, we will + // implicitly allow anything that starts with the "$MQTT." prefix. However, + // we don't just return here, we skip the check for "allow" but will check "deny". + if (c.isMqtt() || c.kind != CLIENT) && strings.HasPrefix(subject, mqttPrefix) { + checkAllow = false + } // Cache miss, check allow then deny as needed. - if c.perms.pub.allow != nil { + if checkAllow && c.perms.pub.allow != nil { np, _ := c.perms.pub.allow.NumInterest(subject) allowed = np != 0 } @@ -4345,7 +4391,7 @@ func (c *client) setupResponseServiceImport(acc *Account, si *serviceImport, tra // Will remove a header if present. func removeHeaderIfPresent(hdr []byte, key string) []byte { - start := bytes.Index(hdr, []byte(key+":")) + start := getHeaderKeyIndex(key, hdr) // key can't be first and we want to check that it is preceded by a '\n' if start < 1 || hdr[start-1] != '\n' { return hdr @@ -4463,22 +4509,13 @@ func sliceHeader(key string, hdr []byte) []byte { if len(hdr) == 0 { return nil } - index := bytes.Index(hdr, stringToBytes(key+":")) - hdrLen := len(hdr) - // Check that we have enough characters, this will handle the -1 case of the key not - // being found and will also handle not having enough characters for trailing CRLF. - if index < 2 { + index := getHeaderKeyIndex(key, hdr) + if index == -1 { return nil } - // There should be a terminating CRLF. - if index >= hdrLen-1 || hdr[index-1] != '\n' || hdr[index-2] != '\r' { - return nil - } - // The key should be immediately followed by a : separator. + // Skip over the key and the : separator. index += len(key) + 1 - if index >= hdrLen || hdr[index-1] != ':' { - return nil - } + hdrLen := len(hdr) // Skip over whitespace before the value. for index < hdrLen && hdr[index] == ' ' { index++ @@ -4494,11 +4531,49 @@ func sliceHeader(key string, hdr []byte) []byte { return hdr[start:index:index] } +// getHeaderKeyIndex returns an index into the header slice for the given key. +// Returns -1 if not found. +func getHeaderKeyIndex(key string, hdr []byte) int { + if len(hdr) == 0 { + return -1 + } + bkey := stringToBytes(key) + keyLen, hdrLen := len(key), len(hdr) + var offset int + for { + index := bytes.Index(hdr[offset:], bkey) + // Check that we have enough characters, this will handle the -1 case of the key not + // being found and will also handle not having enough characters for trailing CRLF. + if index < 2 { + return -1 + } + index += offset + // There should be a terminating CRLF. + if index >= hdrLen-1 || hdr[index-1] != '\n' || hdr[index-2] != '\r' { + offset = index + keyLen + continue + } + // The key should be immediately followed by a : separator. + if index+keyLen >= hdrLen { + return -1 + } + if hdr[index+keyLen] != ':' { + offset = index + keyLen + continue + } + return index + } +} + func setHeader(key, val string, hdr []byte) []byte { - prefix := []byte(key + ": ") - start := bytes.Index(hdr, prefix) + start := getHeaderKeyIndex(key, hdr) if start >= 0 { - valStart := start + len(prefix) + valStart := start + len(key) + 1 + // Preserve single whitespace if used. + hdrLen := len(hdr) + if valStart < hdrLen && hdr[valStart] == ' ' { + valStart++ + } valEnd := bytes.Index(hdr[valStart:], []byte("\r")) if valEnd < 0 { return hdr // malformed headers @@ -5271,8 +5346,10 @@ sendToRoutesOrLeafs: // If we do have a deliver subject we need to do something with it. // Again this is when JetStream (but possibly others) wants the system // to rewrite the delivered subject. The way we will do that is place it - // at the end of the reply subject if it exists. - if len(deliver) > 0 && len(reply) > 0 { + // at the end of the reply subject if it exists. But only if this wasn't + // already performed, otherwise we'd end up with a duplicate '@' suffix + // resulting in a protocol error. + if len(deliver) > 0 && len(reply) > 0 && !remapped { reply = append(reply, '@') reply = append(reply, deliver...) } @@ -5431,6 +5508,9 @@ func (c *client) processPingTimer() { if c.kind == ROUTER && opts.Cluster.PingInterval > 0 { pingInterval = opts.Cluster.PingInterval } + if c.isWebsocket() && opts.Websocket.PingInterval > 0 { + pingInterval = opts.Websocket.PingInterval + } pingInterval = adjustPingInterval(c.kind, pingInterval) now := time.Now() needRTT := c.rtt == 0 || now.Sub(c.rttStart) > DEFAULT_RTT_MEASUREMENT_INTERVAL @@ -5513,6 +5593,9 @@ func (c *client) setPingTimer() { if c.kind == ROUTER && opts.Cluster.PingInterval > 0 { d = opts.Cluster.PingInterval } + if c.isWebsocket() && opts.Websocket.PingInterval > 0 { + d = opts.Websocket.PingInterval + } d = adjustPingInterval(c.kind, d) c.ping.tmr = time.AfterFunc(d, c.processPingTimer) } @@ -5718,7 +5801,7 @@ func (c *client) processSubsOnConfigReload(awcsti map[string]struct{}) { oldShadows := sub.shadow sub.shadow = nil c.mu.Unlock() - c.addShadowSubscriptions(acc, sub, true) + c.addShadowSubscriptions(acc, sub) for _, nsub := range oldShadows { nsub.im.acc.sl.Remove(nsub) } @@ -5766,7 +5849,8 @@ func (c *client) closeConnection(reason ClosedState) { } // If we are shutting down, no need to do all the accounting on subs, etc. - if reason == ServerShutdown { + // During LDM we'll still do the accounting, otherwise account limits could close others after this reconnects. + if reason == ServerShutdown && c.srv.isShuttingDown() { s := c.srv c.mu.Unlock() if s != nil { @@ -6546,6 +6630,9 @@ func (c *client) setFirstPingTimer() { if c.kind == ROUTER && opts.Cluster.PingInterval > 0 { d = opts.Cluster.PingInterval } + if c.isWebsocket() && opts.Websocket.PingInterval > 0 { + d = opts.Websocket.PingInterval + } if !opts.DisableShortFirstPing { if c.kind != CLIENT { if d > firstPingInterval { diff --git a/vendor/github.com/nats-io/nats-server/v2/server/client_proxyproto.go b/vendor/github.com/nats-io/nats-server/v2/server/client_proxyproto.go new file mode 100644 index 00000000000..cdfbda7609b --- /dev/null +++ b/vendor/github.com/nats-io/nats-server/v2/server/client_proxyproto.go @@ -0,0 +1,398 @@ +// Copyright 2025 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "net" + "strconv" + "strings" + "time" +) + +// PROXY protocol v2 constants +const ( + // Protocol signature (12 bytes) + proxyProtoV2Sig = "\x0D\x0A\x0D\x0A\x00\x0D\x0A\x51\x55\x49\x54\x0A" + + // Version and command byte format: version(4 bits) | command(4 bits) + proxyProtoV2VerMask = 0xF0 + proxyProtoV2Ver = 0x20 // Version 2 + + // Commands + proxyProtoCmdMask = 0x0F + proxyProtoCmdLocal = 0x00 // LOCAL command (health check, use original connection) + proxyProtoCmdProxy = 0x01 // PROXY command (proxied connection) + + // Address family and protocol byte format: family(4 bits) | protocol(4 bits) + proxyProtoFamilyMask = 0xF0 + proxyProtoFamilyUnspec = 0x00 // Unspecified + proxyProtoFamilyInet = 0x10 // IPv4 + proxyProtoFamilyInet6 = 0x20 // IPv6 + proxyProtoFamilyUnix = 0x30 // Unix socket + proxyProtoProtoMask = 0x0F + proxyProtoProtoUnspec = 0x00 // Unspecified + proxyProtoProtoStream = 0x01 // TCP/STREAM + proxyProtoProtoDatagram = 0x02 // UDP/DGRAM + + // Address sizes + proxyProtoAddrSizeIPv4 = 12 // 4 (src IP) + 4 (dst IP) + 2 (src port) + 2 (dst port) + proxyProtoAddrSizeIPv6 = 36 // 16 (src IP) + 16 (dst IP) + 2 (src port) + 2 (dst port) + + // Header sizes + proxyProtoV2HeaderSize = 16 // Fixed header: 12 (sig) + 1 (ver/cmd) + 1 (fam/proto) + 2 (addr len) + + // Timeout for reading PROXY protocol header + proxyProtoReadTimeout = 5 * time.Second +) + +// PROXY protocol v1 constants +const ( + proxyProtoV1Prefix = "PROXY " + proxyProtoV1MaxLineLen = 107 // Maximum line length including CRLF + proxyProtoV1TCP4 = "TCP4" + proxyProtoV1TCP6 = "TCP6" + proxyProtoV1Unknown = "UNKNOWN" +) + +var ( + // Errors + errProxyProtoInvalid = errors.New("invalid PROXY protocol header") + errProxyProtoUnsupported = errors.New("unsupported PROXY protocol feature") + errProxyProtoTimeout = errors.New("timeout reading PROXY protocol header") + errProxyProtoUnrecognized = errors.New("unrecognized PROXY protocol format") +) + +// proxyProtoAddr contains the address information extracted from PROXY protocol header +type proxyProtoAddr struct { + srcIP net.IP + srcPort uint16 + dstIP net.IP + dstPort uint16 +} + +// String implements net.Addr interface +func (p *proxyProtoAddr) String() string { + return net.JoinHostPort(p.srcIP.String(), fmt.Sprintf("%d", p.srcPort)) +} + +// Network implements net.Addr interface +func (p *proxyProtoAddr) Network() string { + if p.srcIP.To4() != nil { + return "tcp4" + } + return "tcp6" +} + +// proxyConn wraps a net.Conn to override RemoteAddr() with the address +// extracted from the PROXY protocol header +type proxyConn struct { + net.Conn + remoteAddr net.Addr +} + +// RemoteAddr returns the original client address extracted from PROXY protocol +func (pc *proxyConn) RemoteAddr() net.Addr { + return pc.remoteAddr +} + +// detectProxyProtoVersion reads the first bytes and determines protocol version. +// Returns 1 for v1, 2 for v2, or error. +// The first 6 bytes read are returned so they can be used by the parser. +func detectProxyProtoVersion(conn net.Conn) (version int, header []byte, err error) { + // Read first 6 bytes to check for "PROXY " or v2 signature + header = make([]byte, 6) + if _, err = io.ReadFull(conn, header); err != nil { + return 0, nil, fmt.Errorf("failed to read protocol version: %w", err) + } + switch bytesToString(header) { + case proxyProtoV1Prefix: + return 1, header, nil + case proxyProtoV2Sig[:6]: + return 2, header, nil + default: + return 0, nil, errProxyProtoUnrecognized + } +} + +// readProxyProtoV1Header parses PROXY protocol v1 text format. +// Expects the "PROXY " prefix (6 bytes) to have already been consumed. +func readProxyProtoV1Header(conn net.Conn) (*proxyProtoAddr, error) { + // Read rest of line (max 107 bytes total, already read 6) + maxRemaining := proxyProtoV1MaxLineLen - 6 + + // Read up to maxRemaining bytes at once (more efficient than byte-by-byte) + buf := make([]byte, maxRemaining) + var line []byte + + for len(line) < maxRemaining { + // Read available data + n, err := conn.Read(buf[len(line):]) + if err != nil { + return nil, fmt.Errorf("failed to read v1 line: %w", err) + } + + line = buf[:len(line)+n] + + // Look for CRLF in what we've read so far + for i := 0; i < len(line)-1; i++ { + if line[i] == '\r' && line[i+1] == '\n' { + // Found CRLF - extract just the line portion + line = line[:i] + goto foundCRLF + } + } + } + + // Exceeded max length without finding CRLF + return nil, fmt.Errorf("%w: v1 line too long", errProxyProtoInvalid) + +foundCRLF: + // Get parts from the protocol + parts := strings.Fields(string(line)) + + // Validate format + if len(parts) < 1 { + return nil, fmt.Errorf("%w: invalid v1 format", errProxyProtoInvalid) + } + + // Handle UNKNOWN (health check, like v2 LOCAL) + if parts[0] == proxyProtoV1Unknown { + return nil, nil + } + + // Must have exactly 5 parts: protocol, src-ip, dst-ip, src-port, dst-port + if len(parts) != 5 { + return nil, fmt.Errorf("%w: invalid v1 format", errProxyProtoInvalid) + } + + protocol := parts[0] + srcIP := net.ParseIP(parts[1]) + dstIP := net.ParseIP(parts[2]) + + if srcIP == nil || dstIP == nil { + return nil, fmt.Errorf("%w: invalid address", errProxyProtoInvalid) + } + + // Parse ports + srcPort, err := strconv.ParseUint(parts[3], 10, 16) + if err != nil { + return nil, fmt.Errorf("invalid source port: %w", err) + } + + dstPort, err := strconv.ParseUint(parts[4], 10, 16) + if err != nil { + return nil, fmt.Errorf("invalid dest port: %w", err) + } + + // Validate protocol matches IP version + if protocol == proxyProtoV1TCP4 && srcIP.To4() == nil { + return nil, fmt.Errorf("%w: TCP4 with IPv6 address", errProxyProtoInvalid) + } + if protocol == proxyProtoV1TCP6 && srcIP.To4() != nil { + return nil, fmt.Errorf("%w: TCP6 with IPv4 address", errProxyProtoInvalid) + } + if protocol != proxyProtoV1TCP4 && protocol != proxyProtoV1TCP6 { + return nil, fmt.Errorf("%w: invalid protocol %s", errProxyProtoInvalid, protocol) + } + + return &proxyProtoAddr{ + srcIP: srcIP, + srcPort: uint16(srcPort), + dstIP: dstIP, + dstPort: uint16(dstPort), + }, nil +} + +// readProxyProtoHeader reads and parses PROXY protocol (v1 or v2) from the connection. +// Automatically detects version and routes to appropriate parser. +// If the command is LOCAL/UNKNOWN (health check), it returns nil for addr and no error. +// If the command is PROXY, it returns the parsed address information. +// The connection must be fresh (no data read yet). +func readProxyProtoHeader(conn net.Conn) (*proxyProtoAddr, error) { + // Set read deadline to prevent hanging on slow/malicious clients + if err := conn.SetReadDeadline(time.Now().Add(proxyProtoReadTimeout)); err != nil { + return nil, err + } + defer conn.SetReadDeadline(time.Time{}) + + // Detect version + version, firstBytes, err := detectProxyProtoVersion(conn) + if err != nil { + return nil, err + } + + switch version { + case 1: + // v1 parser expects "PROXY " prefix already consumed + return readProxyProtoV1Header(conn) + case 2: + // Read rest of v2 signature (bytes 6-11, total 6 more bytes) + remaining := make([]byte, 6) + if _, err := io.ReadFull(conn, remaining); err != nil { + return nil, fmt.Errorf("failed to read v2 signature: %w", err) + } + + // Verify full signature + fullSig := string(firstBytes) + string(remaining) + if fullSig != proxyProtoV2Sig { + return nil, fmt.Errorf("%w: invalid signature", errProxyProtoInvalid) + } + + // Read rest of header: ver/cmd, fam/proto, addr-len (4 bytes) + header := make([]byte, 4) + if _, err := io.ReadFull(conn, header); err != nil { + return nil, fmt.Errorf("failed to read v2 header: %w", err) + } + + // Continue with parsing + return parseProxyProtoV2Header(conn, header) + default: + return nil, fmt.Errorf("unsupported PROXY protocol version: %d", version) + } +} + +// readProxyProtoV2Header is kept for backward compatibility and direct testing. +// It reads and parses a PROXY protocol v2 header from the connection. +// If the command is LOCAL (health check), it returns nil for addr and no error. +// If the command is PROXY, it returns the parsed address information. +// The connection must be fresh (no data read yet). +func readProxyProtoV2Header(conn net.Conn) (*proxyProtoAddr, error) { + // Set read deadline to prevent hanging on slow/malicious clients + if err := conn.SetReadDeadline(time.Now().Add(proxyProtoReadTimeout)); err != nil { + return nil, err + } + defer conn.SetReadDeadline(time.Time{}) + + // Read fixed header (16 bytes) + header := make([]byte, proxyProtoV2HeaderSize) + if _, err := io.ReadFull(conn, header); err != nil { + if ne, ok := err.(net.Error); ok && ne.Timeout() { + return nil, errProxyProtoTimeout + } + return nil, fmt.Errorf("failed to read PROXY protocol header: %w", err) + } + + // Validate signature (first 12 bytes) + if string(header[:12]) != proxyProtoV2Sig { + return nil, fmt.Errorf("%w: invalid signature", errProxyProtoInvalid) + } + + // Continue with parsing after signature + return parseProxyProtoV2Header(conn, header[12:16]) +} + +// parseProxyProtoV2Header parses v2 protocol after signature has been validated. +// header contains the 4 bytes: ver/cmd, fam/proto, addr-len (2 bytes). +func parseProxyProtoV2Header(conn net.Conn, header []byte) (*proxyProtoAddr, error) { + // Parse version and command + verCmd := header[0] + version := verCmd & proxyProtoV2VerMask + command := verCmd & proxyProtoCmdMask + + if version != proxyProtoV2Ver { + return nil, fmt.Errorf("%w: invalid version 0x%02x", errProxyProtoInvalid, version) + } + + // Parse address family and protocol + famProto := header[1] + family := famProto & proxyProtoFamilyMask + protocol := famProto & proxyProtoProtoMask + + // Parse address length (big-endian uint16) + addrLen := binary.BigEndian.Uint16(header[2:4]) + + // Handle LOCAL command (health check) + if command == proxyProtoCmdLocal { + // For LOCAL, we should skip the address data if any + if addrLen > 0 { + // Discard the address data + if _, err := io.CopyN(io.Discard, conn, int64(addrLen)); err != nil { + return nil, fmt.Errorf("failed to discard LOCAL command address data: %w", err) + } + } + return nil, nil // nil addr indicates LOCAL command + } + + // Handle PROXY command + if command != proxyProtoCmdProxy { + return nil, fmt.Errorf("unknown PROXY protocol command: 0x%02x", command) + } + + // Validate protocol (we only support STREAM/TCP) + if protocol != proxyProtoProtoStream { + return nil, fmt.Errorf("%w: only STREAM protocol supported", errProxyProtoUnsupported) + } + + // Parse address data based on family + var addr *proxyProtoAddr + var err error + switch family { + case proxyProtoFamilyInet: + addr, err = parseIPv4Addr(conn, addrLen) + case proxyProtoFamilyInet6: + addr, err = parseIPv6Addr(conn, addrLen) + case proxyProtoFamilyUnspec: + // UNSPEC family with PROXY command is valid but rare + // Just skip the address data + if addrLen > 0 { + if _, err := io.CopyN(io.Discard, conn, int64(addrLen)); err != nil { + return nil, fmt.Errorf("failed to discard UNSPEC address address data: %w", err) + } + } + return nil, nil + default: + return nil, fmt.Errorf("%w: unsupported address family 0x%02x", errProxyProtoUnsupported, family) + } + return addr, err +} + +// parseIPv4Addr parses IPv4 address data from PROXY protocol header +func parseIPv4Addr(conn net.Conn, addrLen uint16) (*proxyProtoAddr, error) { + // IPv4: 4 (src IP) + 4 (dst IP) + 2 (src port) + 2 (dst port) = 12 bytes minimum + if addrLen < proxyProtoAddrSizeIPv4 { + return nil, fmt.Errorf("IPv4 address data too short: %d bytes", addrLen) + } + addrData := make([]byte, addrLen) + if _, err := io.ReadFull(conn, addrData); err != nil { + return nil, fmt.Errorf("failed to read IPv4 address data: %w", err) + } + return &proxyProtoAddr{ + srcIP: net.IP(addrData[0:4]), + dstIP: net.IP(addrData[4:8]), + srcPort: binary.BigEndian.Uint16(addrData[8:10]), + dstPort: binary.BigEndian.Uint16(addrData[10:12]), + }, nil +} + +// parseIPv6Addr parses IPv6 address data from PROXY protocol header +func parseIPv6Addr(conn net.Conn, addrLen uint16) (*proxyProtoAddr, error) { + // IPv6: 16 (src IP) + 16 (dst IP) + 2 (src port) + 2 (dst port) = 36 bytes minimum + if addrLen < proxyProtoAddrSizeIPv6 { + return nil, fmt.Errorf("IPv6 address data too short: %d bytes", addrLen) + } + addrData := make([]byte, addrLen) + if _, err := io.ReadFull(conn, addrData); err != nil { + return nil, fmt.Errorf("failed to read IPv6 address data: %w", err) + } + return &proxyProtoAddr{ + srcIP: net.IP(addrData[0:16]), + dstIP: net.IP(addrData[16:32]), + srcPort: binary.BigEndian.Uint16(addrData[32:34]), + dstPort: binary.BigEndian.Uint16(addrData[34:36]), + }, nil +} diff --git a/vendor/github.com/nats-io/nats-server/v2/server/const.go b/vendor/github.com/nats-io/nats-server/v2/server/const.go index dcb9c65ccff..a4a72cc988a 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/const.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/const.go @@ -66,7 +66,7 @@ func init() { const ( // VERSION is the current version for the server. - VERSION = "2.12.1" + VERSION = "2.12.3" // PROTO is the currently supported protocol. // 0 was the original diff --git a/vendor/github.com/nats-io/nats-server/v2/server/consumer.go b/vendor/github.com/nats-io/nats-server/v2/server/consumer.go index 080ec7a1fdc..b1da9029039 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/consumer.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/consumer.go @@ -963,7 +963,7 @@ func (mset *stream) addConsumerWithAssignment(config *ConsumerConfig, oname stri } mset.mu.RLock() - s, jsa, cfg, acc := mset.srv, mset.jsa, mset.cfg, mset.acc + s, js, jsa, cfg, acc := mset.srv, mset.js, mset.jsa, mset.cfg, mset.acc mset.mu.RUnlock() // If we do not have the consumer currently assigned to us in cluster mode we will proceed but warn. @@ -1134,6 +1134,13 @@ func (mset *stream) addConsumerWithAssignment(config *ConsumerConfig, oname stri created: time.Now().UTC(), } + // Add created timestamp used for the store, must match that of the consumer assignment if it exists. + if ca != nil { + js.mu.RLock() + o.created = ca.Created + js.mu.RUnlock() + } + // Bind internal client to the user account. o.client.registerWithAccount(a) // Bind to the system account. @@ -1186,7 +1193,7 @@ func (mset *stream) addConsumerWithAssignment(config *ConsumerConfig, oname stri // Setup our storage if not a direct consumer. if !config.Direct { - store, err := mset.store.ConsumerStore(o.name, config) + store, err := mset.store.ConsumerStore(o.name, o.created, config) if err != nil { mset.mu.Unlock() o.deleteWithoutAdvisory() @@ -3133,6 +3140,12 @@ func (o *consumer) infoWithSnapAndReply(snap bool, reply string) *ConsumerInfo { }) } + np, err := o.checkNumPending() + if err != nil { + o.mu.Unlock() + return nil + } + cfg := o.cfg info := &ConsumerInfo{ Stream: o.stream, @@ -3149,7 +3162,7 @@ func (o *consumer) infoWithSnapAndReply(snap bool, reply string) *ConsumerInfo { }, NumAckPending: len(o.pending), NumRedelivered: len(o.rdc), - NumPending: o.checkNumPending(), + NumPending: np, PushBound: o.isPushMode() && o.active, TimeStamp: time.Now().UTC(), PriorityGroups: priorityGroups, @@ -4519,7 +4532,8 @@ func (o *consumer) processWaiting(eos bool) (int, int, int, time.Time) { var pre *waitingRequest for wr := wq.head; wr != nil; { // Check expiration. - if (eos && wr.noWait && wr.d > 0) || (!wr.expires.IsZero() && now.After(wr.expires)) { + expires := !wr.expires.IsZero() && now.After(wr.expires) + if (eos && wr.noWait) || expires { rdWait := o.replicateDeliveries() if rdWait { // Check if we need to send the timeout after pending replicated deliveries, or can do so immediately. @@ -4528,13 +4542,26 @@ func (o *consumer) processWaiting(eos bool) (int, int, int, time.Time) { } else { wd.pn, wd.pb = wr.n, wr.b } + // If we still need to wait for replicated deliveries, remove from waiting list. + if rdWait { + wr = remove(pre, wr) + continue + } } - if !rdWait { + // Normally it's a timeout. + if expires { hdr := fmt.Appendf(nil, "NATS/1.0 408 Request Timeout\r\n%s: %d\r\n%s: %d\r\n\r\n", JSPullRequestPendingMsgs, wr.n, JSPullRequestPendingBytes, wr.b) o.outq.send(newJSPubMsg(wr.reply, _EMPTY_, _EMPTY_, hdr, nil, nil, 0)) + wr = remove(pre, wr) + continue + } else if wr.expires.IsZero() || wr.d > 0 { + // But if we're NoWait without expiry, we've reached the end of the stream, and we've not delivered any messages. + // Return no messages instead, which is the same as if we'd rejected the pull request initially. + hdr := fmt.Appendf(nil, "NATS/1.0 404 No Messages\r\n\r\n") + o.outq.send(newJSPubMsg(wr.reply, _EMPTY_, _EMPTY_, hdr, nil, nil, 0)) + wr = remove(pre, wr) + continue } - wr = remove(pre, wr) - continue } // Now check interest. interest := wr.acc.sl.HasInterest(wr.interest) @@ -5083,17 +5110,17 @@ func (o *consumer) setMaxPendingBytes(limit int) { // The race is a getNextMsg skips a deleted msg, and then the decStreamPending call fires. // This does some quick sanity checks to see if we should re-calculate num pending. // Lock should be held. -func (o *consumer) checkNumPending() uint64 { +func (o *consumer) checkNumPending() (uint64, error) { if o.mset != nil && o.mset.store != nil { var state StreamState o.mset.store.FastState(&state) npc := o.numPending() if o.sseq > state.LastSeq && npc > 0 || npc > state.Msgs { // Re-calculate. - o.streamNumPending() + return o.streamNumPending() } } - return o.numPending() + return o.numPending(), nil } // Lock should be held. @@ -5120,7 +5147,7 @@ func (o *consumer) checkNumPendingOnEOF() { } // Call into streamNumPending after acquiring the consumer lock. -func (o *consumer) streamNumPendingLocked() uint64 { +func (o *consumer) streamNumPendingLocked() (uint64, error) { o.mu.Lock() defer o.mu.Unlock() return o.streamNumPending() @@ -5129,22 +5156,25 @@ func (o *consumer) streamNumPendingLocked() uint64 { // Will force a set from the stream store of num pending. // Depends on delivery policy, for last per subject we calculate differently. // Lock should be held. -func (o *consumer) streamNumPending() uint64 { +func (o *consumer) streamNumPending() (uint64, error) { if o.mset == nil || o.mset.store == nil { o.npc, o.npf = 0, 0 - return 0 + return 0, nil + } + npc, npf, err := o.calculateNumPending() + if err != nil { + return 0, err } - npc, npf := o.calculateNumPending() o.npc, o.npf = int64(npc), npf - return o.numPending() + return o.numPending(), nil } // Will calculate num pending but only requires a read lock. // Depends on delivery policy, for last per subject we calculate differently. // At least RLock should be held. -func (o *consumer) calculateNumPending() (npc, npf uint64) { +func (o *consumer) calculateNumPending() (npc, npf uint64, err error) { if o.mset == nil || o.mset.store == nil { - return 0, 0 + return 0, 0, nil } isLastPerSubject := o.cfg.DeliverPolicy == DeliverLastPerSubject @@ -5860,7 +5890,8 @@ func (o *consumer) hasNoLocalInterest() bool { // This is when the underlying stream has been purged. // sseq is the new first seq for the stream after purge. -// Lock should NOT be held. +// Consumer lock should NOT be held but the parent stream +// lock MUST be held. func (o *consumer) purge(sseq uint64, slseq uint64, isWider bool) { // Do not update our state unless we know we are the leader. if !o.isLeader() { @@ -5941,7 +5972,7 @@ func (o *consumer) purge(sseq uint64, slseq uint64, isWider bool) { o.mu.Unlock() if err := o.writeStoreState(); err != nil && s != nil && mset != nil { - s.Warnf("Consumer '%s > %s > %s' error on write store state from purge: %v", acc, mset.name(), name, err) + s.Warnf("Consumer '%s > %s > %s' error on write store state from purge: %v", acc, mset.nameLocked(false), name, err) } } @@ -6430,6 +6461,10 @@ func (o *consumer) checkStateForInterestStream(ss *StreamState) error { if asflr&(1<<63) != 0 { return errAckFloorInvalid } + dflr := asflr + if len(state.Pending) > 0 && state.Delivered.Stream > dflr { + dflr = state.Delivered.Stream + } // Check if the underlying stream's last sequence is less than our floor. // This can happen if the stream has been reset and has not caught up yet. @@ -6448,7 +6483,7 @@ func (o *consumer) checkStateForInterestStream(ss *StreamState) error { } var retryAsflr uint64 - for seq = fseq; asflr > 0 && seq <= asflr; seq++ { + for seq = fseq; dflr > 0 && seq <= dflr; seq++ { if filters != nil { _, nseq, err = store.LoadNextMsgMulti(filters, seq, &smv) } else { @@ -6458,14 +6493,24 @@ func (o *consumer) checkStateForInterestStream(ss *StreamState) error { if nseq > seq { seq = nseq } - // Only ack though if no error and seq <= ack floor. - if err == nil && seq <= asflr { - didRemove := mset.ackMsg(o, seq) - // Removing the message could fail. For example if clustered since we need to propose it. - // Overwrite retry floor (only the first time) to allow us to check next time if the removal was successful. - if didRemove && retryAsflr == 0 { - retryAsflr = seq + if err == nil { + // Only ack though if no error and seq <= ack floor. + if seq <= asflr { + didRemove := mset.ackMsg(o, seq) + // Removing the message could fail. For example if clustered since we need to propose it. + // Overwrite retry floor (only the first time) to allow us to check next time if the removal was successful. + if didRemove && retryAsflr == 0 { + retryAsflr = seq + } + } else if seq <= dflr { + // If we have pending, we will need to walk through to delivered in case we missed any of those acks as well. + if _, ok := state.Pending[seq]; !ok { + // The filters are already taken into account, + mset.ackMsg(o, seq) + } } + } else if err == ErrStoreEOF { + break } } // If retry floor was not overwritten, set to ack floor+1, we don't need to account for any retries below it. @@ -6479,21 +6524,7 @@ func (o *consumer) checkStateForInterestStream(ss *StreamState) error { if retryAsflr > o.chkflr { o.chkflr = retryAsflr } - // See if we need to process this update if our parent stream is not a limits policy stream. - state, _ = o.store.State() o.mu.Unlock() - - // If we have pending, we will need to walk through to delivered in case we missed any of those acks as well. - if state != nil && len(state.Pending) > 0 && state.AckFloor.Stream > 0 { - for seq := state.AckFloor.Stream + 1; seq <= state.Delivered.Stream; seq++ { - if _, ok := state.Pending[seq]; !ok { - // Want to call needAck since it is filter aware. - if o.needAck(seq, _EMPTY_) { - mset.ackMsg(o, seq) - } - } - } - } return nil } diff --git a/vendor/github.com/nats-io/nats-server/v2/server/errors.json b/vendor/github.com/nats-io/nats-server/v2/server/errors.json index 410544bdaa2..97c21c7eef3 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/errors.json +++ b/vendor/github.com/nats-io/nats-server/v2/server/errors.json @@ -1998,5 +1998,15 @@ "help": "", "url": "", "deprecates": "" + }, + { + "constant": "JSClusterServerMemberChangeInflightErr", + "code": 400, + "error_code": 10202, + "description": "cluster member change is in progress", + "comment": "", + "help": "", + "url": "", + "deprecates": "" } ] diff --git a/vendor/github.com/nats-io/nats-server/v2/server/events.go b/vendor/github.com/nats-io/nats-server/v2/server/events.go index ff2ee46367e..8bcb3a713fa 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/events.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/events.go @@ -1735,18 +1735,18 @@ func (s *Server) remoteServerUpdate(sub *subscription, c *client, _ *Account, su node := getHash(si.Name) accountNRG := si.AccountNRG() oldInfo, _ := s.nodeToInfo.Swap(node, nodeInfo{ - si.Name, - si.Version, - si.Cluster, - si.Domain, - si.ID, - si.Tags, - cfg, - stats, - false, - si.JetStreamEnabled(), - si.BinaryStreamSnapshot(), - accountNRG, + name: si.Name, + version: si.Version, + cluster: si.Cluster, + domain: si.Domain, + id: si.ID, + tags: si.Tags, + cfg: cfg, + stats: stats, + offline: false, + js: si.JetStreamEnabled(), + binarySnapshots: si.BinaryStreamSnapshot(), + accountNRG: accountNRG, }) if oldInfo == nil || accountNRG != oldInfo.(nodeInfo).accountNRG { // One of the servers we received statsz from changed its mind about @@ -1789,18 +1789,18 @@ func (s *Server) processNewServer(si *ServerInfo) { // Only update if non-existent if _, ok := s.nodeToInfo.Load(node); !ok { s.nodeToInfo.Store(node, nodeInfo{ - si.Name, - si.Version, - si.Cluster, - si.Domain, - si.ID, - si.Tags, - nil, - nil, - false, - si.JetStreamEnabled(), - si.BinaryStreamSnapshot(), - si.AccountNRG(), + name: si.Name, + version: si.Version, + cluster: si.Cluster, + domain: si.Domain, + id: si.ID, + tags: si.Tags, + cfg: nil, + stats: nil, + offline: false, + js: si.JetStreamEnabled(), + binarySnapshots: si.BinaryStreamSnapshot(), + accountNRG: si.AccountNRG(), }) } } diff --git a/vendor/github.com/nats-io/nats-server/v2/server/filestore.go b/vendor/github.com/nats-io/nats-server/v2/server/filestore.go index 2ac494f853a..1f34ff958cd 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/filestore.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/filestore.go @@ -25,7 +25,6 @@ import ( "encoding/json" "errors" "fmt" - "hash" "io" "io/fs" "math" @@ -194,7 +193,7 @@ type fileStore struct { psim *stree.SubjectTree[psi] tsl int adml int - hh hash.Hash64 + hh *highwayhash.Digest64 qch chan struct{} fsld chan struct{} cmu sync.RWMutex @@ -239,7 +238,7 @@ type msgBlock struct { lrts int64 lsts int64 llseq uint64 - hh hash.Hash64 + hh *highwayhash.Digest64 ecache elastic.Pointer[cache] cache *cache cloads uint64 @@ -468,7 +467,7 @@ func newFileStoreWithCreated(fcfg FileStoreConfig, cfg StreamConfig, created tim // Create highway hash for message blocks. Use sha256 of directory as key. key := sha256.Sum256([]byte(cfg.Name)) - fs.hh, err = highwayhash.New64(key[:]) + fs.hh, err = highwayhash.NewDigest64(key[:]) if err != nil { return nil, fmt.Errorf("could not create hash: %v", err) } @@ -939,7 +938,8 @@ func (fs *fileStore) writeStreamMeta() error { } fs.hh.Reset() fs.hh.Write(b) - checksum := hex.EncodeToString(fs.hh.Sum(nil)) + var hb [highwayhash.Size64]byte + checksum := hex.EncodeToString(fs.hh.Sum(hb[:0])) sum := filepath.Join(fs.fcfg.StoreDir, JetStreamMetaFileSum) err = fs.writeFileWithOptionalSync(sum, []byte(checksum), defaultFilePerms) if err != nil { @@ -1040,7 +1040,7 @@ func (fs *fileStore) initMsgBlock(index uint32) *msgBlock { if mb.hh == nil { key := sha256.Sum256(fs.hashKeyForBlock(index)) - mb.hh, _ = highwayhash.New64(key[:]) + mb.hh, _ = highwayhash.NewDigest64(key[:]) } return mb } @@ -1353,6 +1353,8 @@ func (mb *msgBlock) convertCipher() error { // Check for compression, and make sure we can parse with old cipher and key file. if nbuf, err := mb.decompressIfNeeded(buf); err != nil { return err + } else if _, _, err = mb.rebuildStateFromBufLocked(nbuf, false); err != nil { + return err } else if err = mb.indexCacheBuf(nbuf); err != nil { return err } @@ -1391,8 +1393,9 @@ func (mb *msgBlock) convertToEncrypted() error { // Check for compression. if buf, err = mb.decompressIfNeeded(buf); err != nil { return err - } - if err := mb.indexCacheBuf(buf); err != nil { + } else if _, _, err = mb.rebuildStateFromBufLocked(buf, false); err != nil { + return err + } else if err = mb.indexCacheBuf(buf); err != nil { // This likely indicates this was already encrypted or corrupt. mb.cache = nil return err @@ -1427,8 +1430,6 @@ func (mb *msgBlock) rebuildState() (*LostStreamData, []uint64, error) { // Rebuild the state of the blk based on what we have on disk in the N.blk file. // Lock should be held. func (mb *msgBlock) rebuildStateLocked() (*LostStreamData, []uint64, error) { - startLastSeq := atomic.LoadUint64(&mb.last.seq) - // Remove the .fss file and clear any cache we have set. mb.clearCacheAndOffset() @@ -1455,12 +1456,6 @@ func (mb *msgBlock) rebuildStateLocked() (*LostStreamData, []uint64, error) { return ld, nil, err } - // Clear state we need to rebuild. - mb.msgs, mb.bytes, mb.rbytes, mb.fss = 0, 0, 0, nil - atomic.StoreUint64(&mb.last.seq, 0) - mb.last.ts = 0 - firstNeedsSet := true - // Check if we need to decrypt. if err = mb.encryptOrDecryptIfNeeded(buf); err != nil { return nil, nil, err @@ -1469,6 +1464,19 @@ func (mb *msgBlock) rebuildStateLocked() (*LostStreamData, []uint64, error) { if buf, err = mb.decompressIfNeeded(buf); err != nil { return nil, nil, err } + return mb.rebuildStateFromBufLocked(buf, true) +} + +// Lock should be held. +func (mb *msgBlock) rebuildStateFromBufLocked(buf []byte, allowTruncate bool) (*LostStreamData, []uint64, error) { + var err error + startLastSeq := atomic.LoadUint64(&mb.last.seq) + + // Clear state we need to rebuild. + mb.msgs, mb.bytes, mb.rbytes, mb.fss = 0, 0, 0, nil + atomic.StoreUint64(&mb.last.seq, 0) + mb.last.ts = 0 + firstNeedsSet := true mb.rbytes = uint64(len(buf)) @@ -1482,6 +1490,12 @@ func (mb *msgBlock) rebuildStateLocked() (*LostStreamData, []uint64, error) { var le = binary.LittleEndian truncate := func(index uint32) { + // There are cases where we're not allowed to truncate, like for an encrypted or compressed + // block since the index will be the decrypted and decompressed index. + if !allowTruncate { + return + } + var fd *os.File if mb.mfd != nil { fd = mb.mfd @@ -2705,7 +2719,7 @@ func (mb *msgBlock) firstMatchingMulti(sl *gsl.SimpleSublist, start uint64, sm * if err != nil { continue } - expireOk := seq == lseq && mb.llseq == seq + expireOk := seq == lseq && mb.llseq != llseq && mb.llseq == seq updateLLTS = false // cacheLookup already updated it. if sl.HasInterest(fsm.subj) { return fsm, expireOk, nil @@ -2736,7 +2750,9 @@ func (mb *msgBlock) firstMatching(filter string, wc bool, start uint64, sm *Stor var didLoad bool if mb.fssNotLoaded() { // Make sure we have fss loaded. - mb.loadMsgsWithLock() + if err := mb.loadMsgsWithLock(); err != nil { + return nil, false, err + } didLoad = true } // Mark fss activity. @@ -2839,7 +2855,7 @@ func (mb *msgBlock) firstMatching(filter string, wc bool, start uint64, sm *Stor continue } updateLLTS = false // cacheLookup already updated it. - expireOk := seq == lseq && mb.llseq == seq + expireOk := seq == lseq && mb.llseq != llseq && mb.llseq == seq if isAll { return fsm, expireOk, nil } @@ -2855,6 +2871,120 @@ func (mb *msgBlock) firstMatching(filter string, wc bool, start uint64, sm *Stor return nil, didLoad, ErrStoreMsgNotFound } +// Find the previous matching message against a sublist, working BACKWARDS from start. +func (mb *msgBlock) prevMatchingMulti(sl *gsl.SimpleSublist, start uint64, sm *StoreMsg) (*StoreMsg, bool, error) { + mb.mu.Lock() + var didLoad bool + var updateLLTS bool + defer func() { + if updateLLTS { + mb.llts = ats.AccessTime() + } + mb.finishedWithCache() + mb.mu.Unlock() + }() + + // Need messages loaded from here on out. + if mb.cacheNotLoaded() { + if err := mb.loadMsgsWithLock(); err != nil { + return nil, false, err + } + didLoad = true + } + + // Make sure to start at mb.last.seq if lseq < mb.last.seq + if seq := atomic.LoadUint64(&mb.last.seq); start > seq { + start = seq + } + lseq := atomic.LoadUint64(&mb.first.seq) + + if sm == nil { + sm = new(StoreMsg) + } + + // If the FSS state has fewer entries than sequences in the linear scan, + // then use intersection instead as likely going to be cheaper. This will + // often be the case with high numbers of deletes, as well as a smaller + // number of subjects in the block. + if uint64(mb.fss.Size()) < start-lseq { + // If there are no subject matches then this is effectively no-op. + hseq := uint64(0) + gsl.IntersectStree(mb.fss, sl, func(subj []byte, ss *SimpleState) { + if ss.firstNeedsUpdate || ss.lastNeedsUpdate { + // mb is already loaded into the cache so should be fast-ish. + mb.recalculateForSubj(bytesToString(subj), ss) + } + first := min(start, ss.Last) + // Skip if cutoff is before this subject's first, or if we already + // have a higher-or-equal candidate (hseq holds the highest found). + if first < ss.First || first <= hseq { + // The start cutoff is before the first sequence for this subject, + // or we already know of a subject with a later-or-equal msg. + return + } + if first == ss.Last { + // If the start floor is above where this subject starts then we can + // short-circuit, avoiding needing to scan for the next message. + if fsm, err := mb.cacheLookup(ss.Last, sm); err == nil { + sm = fsm + hseq = ss.Last + } + return + } + for seq := first; seq >= ss.First; seq-- { + // Otherwise we have a start floor that intersects where this subject + // has messages in the block, so we need to walk up until we find a + // message matching the subject. + if mb.dmap.Exists(seq) { + // Optimisation to avoid calling cacheLookup which hits time.Now(). + // Instead we will update it only once in a defer. + updateLLTS = true + continue + } + llseq := mb.llseq + fsm, err := mb.cacheLookup(seq, sm) + if err != nil { + continue + } + updateLLTS = false // cacheLookup already updated it. + if sl.HasInterest(fsm.subj) { + hseq = seq + sm = fsm + break + } + // If we are here we did not match, so put the llseq back. + mb.llseq = llseq + } + }) + if hseq > 0 && sm != nil { + return sm, didLoad && start == lseq, nil + } + } else { + for seq := start; seq >= lseq; seq-- { + if mb.dmap.Exists(seq) { + // Optimisation to avoid calling cacheLookup which hits time.Now(). + // Instead we will update it only once in a defer. + updateLLTS = true + continue + } + llseq := mb.llseq + fsm, err := mb.cacheLookup(seq, sm) + if err != nil { + continue + } + expireOk := seq == lseq && mb.llseq != llseq && mb.llseq == seq + updateLLTS = false // cacheLookup already updated it. + if sl.HasInterest(fsm.subj) { + return fsm, expireOk, nil + } + // If we are here we did not match, so put the llseq back. + mb.llseq = llseq + } + } + + return nil, didLoad, ErrStoreMsgNotFound +} + // This will traverse a message block and generate the filtered pending. func (mb *msgBlock) filteredPending(subj string, wc bool, seq uint64) (total, first, last uint64) { mb.mu.Lock() @@ -3226,7 +3356,10 @@ func (fs *fileStore) SubjectsState(subject string) map[string]SimpleState { var shouldExpire bool if mb.fssNotLoaded() { // Make sure we have fss loaded. - mb.loadMsgsWithLock() + if err := mb.loadMsgsWithLock(); err != nil { + mb.mu.Unlock() + return nil + } shouldExpire = true } // Mark fss activity. @@ -3290,7 +3423,10 @@ func (fs *fileStore) allLastSeqsLocked() ([]uint64, error) { var shouldExpire bool if mb.fssNotLoaded() { // Make sure we have fss loaded. - mb.loadMsgsWithLock() + if err := mb.loadMsgsWithLock(); err != nil { + mb.mu.Unlock() + return nil, err + } shouldExpire = true } @@ -3406,6 +3542,7 @@ func (fs *fileStore) MultiLastSeqs(filters []string, maxSeq uint64, maxAllowed i // Iterate the fss and check against our subs. We will delete from subs as we add. // Once len(subs) == 0 we are done. + var ierr error mb.fss.IterFast(func(bsubj []byte, ss *SimpleState) bool { // Already been processed and accounted for was not matched in the first place. if subs[string(bsubj)] == nil { @@ -3424,7 +3561,9 @@ func (fs *fileStore) MultiLastSeqs(filters []string, maxSeq uint64, maxAllowed i // Need to search for the real last since recorded last is > maxSeq. var didLoad bool if mb.cacheNotLoaded() { - mb.loadMsgsWithLock() + if ierr = mb.loadMsgsWithLock(); ierr != nil { + return false + } didLoad = true } var smv StoreMsg @@ -3447,6 +3586,9 @@ func (fs *fileStore) MultiLastSeqs(filters []string, maxSeq uint64, maxAllowed i return true }) mb.mu.Unlock() + if ierr != nil { + return nil, ierr + } // If maxAllowed was sepcified check that we will not exceed that. if maxAllowed > 0 && len(seqs) > maxAllowed { @@ -3462,7 +3604,7 @@ func (fs *fileStore) MultiLastSeqs(filters []string, maxSeq uint64, maxAllowed i // NumPending will return the number of pending messages matching the filter subject starting at sequence. // Optimized for stream num pending calculations for consumers. -func (fs *fileStore) NumPending(sseq uint64, filter string, lastPerSubject bool) (total, validThrough uint64) { +func (fs *fileStore) NumPending(sseq uint64, filter string, lastPerSubject bool) (total, validThrough uint64, err error) { fs.mu.RLock() defer fs.mu.RUnlock() @@ -3470,7 +3612,7 @@ func (fs *fileStore) NumPending(sseq uint64, filter string, lastPerSubject bool) validThrough = fs.state.LastSeq if fs.state.Msgs == 0 || sseq > fs.state.LastSeq { - return 0, validThrough + return 0, validThrough, nil } // If sseq is less then our first set to first. @@ -3500,9 +3642,9 @@ func (fs *fileStore) NumPending(sseq uint64, filter string, lastPerSubject bool) // If we are isAll and have no deleted we can do a simpler calculation. if !lastPerSubject && isAll && (fs.state.LastSeq-fs.state.FirstSeq+1) == fs.state.Msgs { if sseq == 0 { - return fs.state.Msgs, validThrough + return fs.state.Msgs, validThrough, nil } - return fs.state.LastSeq - sseq + 1, validThrough + return fs.state.LastSeq - sseq + 1, validThrough, nil } _tsa, _fsa := [32]string{}, [32]string{} @@ -3530,7 +3672,7 @@ func (fs *fileStore) NumPending(sseq uint64, filter string, lastPerSubject bool) if lastPerSubject { // If we want all and our start sequence is equal or less than first return number of subjects. if isAll && sseq <= fs.state.FirstSeq { - return uint64(fs.psim.Size()), validThrough + return uint64(fs.psim.Size()), validThrough, nil } // If we are here we need to scan. We are going to scan the PSIM looking for lblks that are >= seqStart. // This will build up a list of all subjects from the selected block onward. @@ -3563,7 +3705,10 @@ func (fs *fileStore) NumPending(sseq uint64, filter string, lastPerSubject bool) // We need to discount the total by subjects seen before sseq, but also add them right back in if they are >= sseq for this blk. // This only should be subjects we know have the last blk in this block. if mb.cacheNotLoaded() { - mb.loadMsgsWithLock() + if err = mb.loadMsgsWithLock(); err != nil { + mb.mu.Unlock() + return 0, 0, err + } shouldExpire = true } var smv StoreMsg @@ -3604,7 +3749,7 @@ func (fs *fileStore) NumPending(sseq uint64, filter string, lastPerSubject bool) mb.llts = ats.AccessTime() } mb.mu.Unlock() - return total, validThrough + return total, validThrough, nil } // If we would need to scan more from the beginning, revert back to calculating directly here. @@ -3623,7 +3768,10 @@ func (fs *fileStore) NumPending(sseq uint64, filter string, lastPerSubject bool) // If we are here we need to at least scan the subject fss. // Make sure we have fss loaded. if mb.fssNotLoaded() { - mb.loadMsgsWithLock() + if err = mb.loadMsgsWithLock(); err != nil { + mb.mu.Unlock() + return 0, 0, err + } shouldExpire = true } // Mark fss activity. @@ -3652,7 +3800,10 @@ func (fs *fileStore) NumPending(sseq uint64, filter string, lastPerSubject bool) if havePartial { // Make sure we have the cache loaded. if mb.cacheNotLoaded() { - mb.loadMsgsWithLock() + if err = mb.loadMsgsWithLock(); err != nil { + mb.mu.Unlock() + return 0, 0, err + } shouldExpire = true } // Clear on partial. @@ -3677,7 +3828,7 @@ func (fs *fileStore) NumPending(sseq uint64, filter string, lastPerSubject bool) mb.mu.Unlock() total += t } - return total, validThrough + return total, validThrough, nil } // If we are here it's better to calculate totals from psim and adjust downward by scanning less blocks. @@ -3692,7 +3843,7 @@ func (fs *fileStore) NumPending(sseq uint64, filter string, lastPerSubject bool) }) // See if we were asked for all, if so we are done. if sseq <= fs.state.FirstSeq { - return total, validThrough + return total, validThrough, nil } // If we are here we need to calculate partials for the first blocks. @@ -3725,7 +3876,10 @@ func (fs *fileStore) NumPending(sseq uint64, filter string, lastPerSubject bool) // We need to adjust for all matches in this block. // Make sure we have fss loaded. This loads whole block now. if mb.fssNotLoaded() { - mb.loadMsgsWithLock() + if err = mb.loadMsgsWithLock(); err != nil { + mb.mu.Unlock() + return 0, 0, err + } shouldExpire = true } // Mark fss activity. @@ -3738,7 +3892,10 @@ func (fs *fileStore) NumPending(sseq uint64, filter string, lastPerSubject bool) } else { // This is the last block. We need to scan per message here. if mb.cacheNotLoaded() { - mb.loadMsgsWithLock() + if err = mb.loadMsgsWithLock(); err != nil { + mb.mu.Unlock() + return 0, 0, err + } shouldExpire = true } var last = atomic.LoadUint64(&mb.last.seq) @@ -3778,13 +3935,13 @@ func (fs *fileStore) NumPending(sseq uint64, filter string, lastPerSubject bool) // Make final adjustment. total -= adjust - return total, validThrough + return total, validThrough, nil } // NumPending will return the number of pending messages matching any subject in the sublist starting at sequence. // Optimized for stream num pending calculations for consumers with lots of filtered subjects. // Subjects should not overlap, this property is held when doing multi-filtered consumers. -func (fs *fileStore) NumPendingMulti(sseq uint64, sl *gsl.SimpleSublist, lastPerSubject bool) (total, validThrough uint64) { +func (fs *fileStore) NumPendingMulti(sseq uint64, sl *gsl.SimpleSublist, lastPerSubject bool) (total, validThrough uint64, err error) { fs.mu.RLock() defer fs.mu.RUnlock() @@ -3792,7 +3949,7 @@ func (fs *fileStore) NumPendingMulti(sseq uint64, sl *gsl.SimpleSublist, lastPer validThrough = fs.state.LastSeq if fs.state.Msgs == 0 || sseq > fs.state.LastSeq { - return 0, validThrough + return 0, validThrough, nil } // If sseq is less then our first set to first. @@ -3821,9 +3978,9 @@ func (fs *fileStore) NumPendingMulti(sseq uint64, sl *gsl.SimpleSublist, lastPer // If we are isAll and have no deleted we can do a simpler calculation. if !lastPerSubject && isAll && (fs.state.LastSeq-fs.state.FirstSeq+1) == fs.state.Msgs { if sseq == 0 { - return fs.state.Msgs, validThrough + return fs.state.Msgs, validThrough, nil } - return fs.state.LastSeq - sseq + 1, validThrough + return fs.state.LastSeq - sseq + 1, validThrough, nil } // Setup the isMatch function. isMatch := func(subj string) bool { @@ -3841,7 +3998,7 @@ func (fs *fileStore) NumPendingMulti(sseq uint64, sl *gsl.SimpleSublist, lastPer if lastPerSubject { // If we want all and our start sequence is equal or less than first return number of subjects. if isAll && sseq <= fs.state.FirstSeq { - return uint64(fs.psim.Size()), validThrough + return uint64(fs.psim.Size()), validThrough, nil } // If we are here we need to scan. We are going to scan the PSIM looking for lblks that are >= seqStart. // This will build up a list of all subjects from the selected block onward. @@ -3874,7 +4031,10 @@ func (fs *fileStore) NumPendingMulti(sseq uint64, sl *gsl.SimpleSublist, lastPer // We need to discount the total by subjects seen before sseq, but also add them right back in if they are >= sseq for this blk. // This only should be subjects we know have the last blk in this block. if mb.cacheNotLoaded() { - mb.loadMsgsWithLock() + if err = mb.loadMsgsWithLock(); err != nil { + mb.mu.Unlock() + return 0, 0, err + } shouldExpire = true } var smv StoreMsg @@ -3915,7 +4075,7 @@ func (fs *fileStore) NumPendingMulti(sseq uint64, sl *gsl.SimpleSublist, lastPer mb.llts = ats.AccessTime() } mb.mu.Unlock() - return total, validThrough + return total, validThrough, nil } // If we would need to scan more from the beginning, revert back to calculating directly here. @@ -3933,7 +4093,10 @@ func (fs *fileStore) NumPendingMulti(sseq uint64, sl *gsl.SimpleSublist, lastPer // If we are here we need to at least scan the subject fss. // Make sure we have fss loaded. if mb.fssNotLoaded() { - mb.loadMsgsWithLock() + if err = mb.loadMsgsWithLock(); err != nil { + mb.mu.Unlock() + return 0, 0, err + } shouldExpire = true } // Mark fss activity. @@ -3963,7 +4126,10 @@ func (fs *fileStore) NumPendingMulti(sseq uint64, sl *gsl.SimpleSublist, lastPer if havePartial { // Make sure we have the cache loaded. if mb.cacheNotLoaded() { - mb.loadMsgsWithLock() + if err = mb.loadMsgsWithLock(); err != nil { + mb.mu.Unlock() + return 0, 0, err + } shouldExpire = true } // Clear on partial. @@ -3997,7 +4163,7 @@ func (fs *fileStore) NumPendingMulti(sseq uint64, sl *gsl.SimpleSublist, lastPer mb.mu.Unlock() total += t } - return total, validThrough + return total, validThrough, nil } // If we are here it's better to calculate totals from psim and adjust downward by scanning less blocks. @@ -4012,7 +4178,7 @@ func (fs *fileStore) NumPendingMulti(sseq uint64, sl *gsl.SimpleSublist, lastPer // See if we were asked for all, if so we are done. if sseq <= fs.state.FirstSeq { - return total, validThrough + return total, validThrough, nil } // If we are here we need to calculate partials for the first blocks. @@ -4045,7 +4211,10 @@ func (fs *fileStore) NumPendingMulti(sseq uint64, sl *gsl.SimpleSublist, lastPer // We need to adjust for all matches in this block. // Make sure we have fss loaded. This loads whole block now. if mb.fssNotLoaded() { - mb.loadMsgsWithLock() + if err = mb.loadMsgsWithLock(); err != nil { + mb.mu.Unlock() + return 0, 0, err + } shouldExpire = true } // Mark fss activity. @@ -4057,7 +4226,10 @@ func (fs *fileStore) NumPendingMulti(sseq uint64, sl *gsl.SimpleSublist, lastPer } else { // This is the last block. We need to scan per message here. if mb.cacheNotLoaded() { - mb.loadMsgsWithLock() + if err = mb.loadMsgsWithLock(); err != nil { + mb.mu.Unlock() + return 0, 0, err + } shouldExpire = true } var last = atomic.LoadUint64(&mb.last.seq) @@ -4097,7 +4269,7 @@ func (fs *fileStore) NumPendingMulti(sseq uint64, sl *gsl.SimpleSublist, lastPer // Make final adjustment. total -= adjust - return total, validThrough + return total, validThrough, nil } // SubjectsTotals return message totals per subject. @@ -4251,7 +4423,7 @@ func (fs *fileStore) newMsgBlockForWrite() (*msgBlock, error) { // Now do local hash. key := sha256.Sum256(fs.hashKeyForBlock(index)) - hh, err := highwayhash.New64(key[:]) + hh, err := highwayhash.NewDigest64(key[:]) if err != nil { return nil, fmt.Errorf("could not create hash: %v", err) } @@ -4522,26 +4694,15 @@ func (mb *msgBlock) skipMsg(seq uint64, now int64) { atomic.StoreUint64(&mb.first.seq, seq+1) mb.first.ts = 0 needsRecord = mb == mb.fs.lmb - if needsRecord && mb.rbytes > 0 { - // We want to make sure since we have no messages - // that we write to the beginning since we only need last one. - mb.rbytes, mb.cache = 0, &cache{} - mb.ecache.Set(mb.cache) - // If encrypted we need to reset counter since we just keep one. - if mb.bek != nil { - // Recreate to reset counter. - mb.bek, _ = genBlockEncryptionKey(mb.fs.fcfg.Cipher, mb.seed, mb.nonce) - } - } } else { needsRecord = true mb.dmap.Insert(seq) } - mb.mu.Unlock() - if needsRecord { - mb.writeMsgRecord(emptyRecordLen, seq|ebit, _EMPTY_, nil, nil, now, true) - } else { + mb.writeMsgRecordLocked(emptyRecordLen, seq|ebit, _EMPTY_, nil, nil, now, true, true) + } + mb.mu.Unlock() + if !needsRecord { mb.kickFlusher() } } @@ -4629,10 +4790,9 @@ func (fs *fileStore) SkipMsgs(seq uint64, num uint64) error { mb.dmap.Insert(seq) } } - mb.mu.Unlock() - // Write out our placeholder. - mb.writeMsgRecord(emptyRecordLen, lseq|ebit, _EMPTY_, nil, nil, now, true) + mb.writeMsgRecordLocked(emptyRecordLen, lseq|ebit, _EMPTY_, nil, nil, now, true, true) + mb.mu.Unlock() // Now update FS accounting. // Update fs state. @@ -4756,6 +4916,17 @@ func (fs *fileStore) enforceMsgLimit() { return } for nmsgs := fs.state.Msgs; nmsgs > uint64(fs.cfg.MaxMsgs); nmsgs = fs.state.Msgs { + // If the first block can be removed fully, purge it entirely without needing to walk sequences. + if len(fs.blks) > 0 { + fmb := fs.blks[0] + fmb.mu.RLock() + msgs := fmb.msgs + fmb.mu.RUnlock() + if nmsgs-msgs > uint64(fs.cfg.MaxMsgs) { + fs.purgeMsgBlock(fmb) + continue + } + } if removed, err := fs.deleteFirstMsg(); err != nil || !removed { fs.rebuildFirst() return @@ -4773,6 +4944,17 @@ func (fs *fileStore) enforceBytesLimit() { return } for bs := fs.state.Bytes; bs > uint64(fs.cfg.MaxBytes); bs = fs.state.Bytes { + // If the first block can be removed fully, purge it entirely without needing to walk sequences. + if len(fs.blks) > 0 { + fmb := fs.blks[0] + fmb.mu.RLock() + bytes := fmb.bytes + fmb.mu.RUnlock() + if bs-bytes > uint64(fs.cfg.MaxBytes) { + fs.purgeMsgBlock(fmb) + continue + } + } if removed, err := fs.deleteFirstMsg(); err != nil || !removed { fs.rebuildFirst() return @@ -4981,10 +5163,6 @@ func (fs *fileStore) removeMsg(seq uint64, secure, viaLimits, needFSLock bool) ( fsUnlock() return false, ErrStoreClosed } - if !viaLimits && fs.sips > 0 { - fsUnlock() - return false, ErrStoreSnapshotInProgress - } // If in encrypted mode negate secure rewrite here. if secure && fs.prf != nil { secure = false @@ -5009,21 +5187,42 @@ func (fs *fileStore) removeMsg(seq uint64, secure, viaLimits, needFSLock bool) ( return false, nil } + fifo := seq == atomic.LoadUint64(&mb.first.seq) + isLastBlock := mb == fs.lmb + isEmpty := mb.msgs == 1 // ... about to be zero though. + // We used to not have to load in the messages except with callbacks or the filtered subject state (which is now always on). // Now just load regardless. // TODO(dlc) - Figure out a way not to have to load it in, we need subject tracking outside main data block. + var didLoad bool if mb.cacheNotLoaded() { if err := mb.loadMsgsWithLock(); err != nil { mb.mu.Unlock() fsUnlock() return false, err } + didLoad = true + } + finishedWithCache := func() { + if didLoad { + mb.finishedWithCache() + } } var smv StoreMsg - sm, err := mb.cacheLookupNoCopy(seq, &smv) + var sm *StoreMsg + var err error + if secure { + // For a secure erase we can't use NoCopy, as eraseMsg will overwrite the + // cache and we won't be able to access sm.subj etc anymore later on. + sm, err = mb.cacheLookup(seq, &smv) + } else { + // For a non-secure erase it's fine to use NoCopy, as the cache won't change + // from underneath us. + sm, err = mb.cacheLookupNoCopy(seq, &smv) + } if err != nil { - mb.finishedWithCache() + finishedWithCache() mb.mu.Unlock() fsUnlock() // Mimic err behavior from above check to dmap. No error returned if already removed. @@ -5032,12 +5231,51 @@ func (fs *fileStore) removeMsg(seq uint64, secure, viaLimits, needFSLock bool) ( } return false, err } + + // Check if we need to write a deleted record tombstone. + // This is for user initiated removes or to hold the first seq + // when the last block is empty. + // If not via limits and not empty (empty writes tombstone below if last) write tombstone. + if !viaLimits && !isEmpty && sm != nil { + mb.mu.Unlock() // Only safe way to checkLastBlock is to unlock here... + lmb, err := fs.checkLastBlock(emptyRecordLen) + if err != nil { + finishedWithCache() + fsUnlock() + return false, err + } + if err := lmb.writeTombstone(sm.seq, sm.ts); err != nil { + finishedWithCache() + fsUnlock() + return false, err + } + mb.mu.Lock() // We'll need the lock back to carry on safely. + } + // Grab size msz := fileStoreMsgSize(sm.subj, sm.hdr, sm.msg) // Set cache timestamp for last remove. mb.lrts = ats.AccessTime() + // Must always perform the erase, even if the block is empty as it could contain tombstones. + if secure { + // Grab record info, but use the pre-computed record length. + ri, _, _, err := mb.slotInfo(int(seq - mb.cache.fseq)) + if err != nil { + finishedWithCache() + mb.mu.Unlock() + fsUnlock() + return false, err + } + if err := mb.eraseMsg(seq, int(ri), int(msz), isLastBlock); err != nil { + finishedWithCache() + mb.mu.Unlock() + fsUnlock() + return false, err + } + } + // Global stats if fs.state.Msgs > 0 { fs.state.Msgs-- @@ -5077,28 +5315,6 @@ func (fs *fileStore) removeMsg(seq uint64, secure, viaLimits, needFSLock bool) ( } } - fifo := seq == atomic.LoadUint64(&mb.first.seq) - isLastBlock := mb == fs.lmb - isEmpty := mb.msgs == 0 - - // If erase but block is empty, we can simply remove the block later. - if secure && !isEmpty { - // Grab record info, but use the pre-computed record length. - ri, _, _, err := mb.slotInfo(int(seq - mb.cache.fseq)) - if err != nil { - mb.finishedWithCache() - mb.mu.Unlock() - fsUnlock() - return false, err - } - if err := mb.eraseMsg(seq, int(ri), int(msz), isLastBlock); err != nil { - mb.finishedWithCache() - mb.mu.Unlock() - fsUnlock() - return false, err - } - } - if fifo { mb.selectNextFirst() if !isEmpty { @@ -5135,11 +5351,11 @@ func (fs *fileStore) removeMsg(seq uint64, secure, viaLimits, needFSLock bool) ( // We will write a tombstone at the end. var firstSeqNeedsUpdate bool if isEmpty { - // This writes tombstone iff mb == lmb, so no need to do below. + // This writes tombstone iff mb == lmb, so no need to do above. fs.removeMsgBlock(mb) firstSeqNeedsUpdate = seq == fs.state.FirstSeq } - mb.finishedWithCache() + finishedWithCache() mb.mu.Unlock() // If we emptied the current message block and the seq was state.FirstSeq @@ -5149,15 +5365,6 @@ func (fs *fileStore) removeMsg(seq uint64, secure, viaLimits, needFSLock bool) ( fs.selectNextFirst() } - // Check if we need to write a deleted record tombstone. - // This is for user initiated removes or to hold the first seq - // when the last block is empty. - - // If not via limits and not empty (empty writes tombstone above if last) write tombstone. - if !viaLimits && !isEmpty && sm != nil { - fs.writeTombstone(sm.seq, sm.ts) - } - if cb := fs.scb; cb != nil { // If we have a callback registered we need to release lock regardless since cb might need it to lookup msg, etc. fs.mu.Unlock() @@ -5209,11 +5416,11 @@ func (mb *msgBlock) compact() { // writing new messages. We will silently bail on any issues with the underlying block and let someone else detect. // if fseq > 0 we will attempt to cleanup stale tombstones. // Write lock needs to be held. -func (mb *msgBlock) compactWithFloor(floor uint64) { +func (mb *msgBlock) compactWithFloor(floor uint64) error { wasLoaded := mb.cache != nil && mb.cacheAlreadyLoaded() if !wasLoaded { if err := mb.loadMsgsWithLock(); err != nil { - return + return err } } defer mb.finishedWithCache() @@ -5225,15 +5432,18 @@ func (mb *msgBlock) compactWithFloor(floor uint64) { var le = binary.LittleEndian var firstSet bool + var last uint64 + var msgs uint64 fseq := atomic.LoadUint64(&mb.first.seq) + lseq := atomic.LoadUint64(&mb.last.seq) isDeleted := func(seq uint64) bool { return seq == 0 || seq&ebit != 0 || mb.dmap.Exists(seq) || seq < fseq } for index, lbuf := uint32(0), uint32(len(buf)); index < lbuf; { if index+msgHdrSize > lbuf { - return + return fmt.Errorf("message overrun") } hdr := buf[index : index+msgHdrSize] rl, slen := le.Uint32(hdr[0:]), int(le.Uint16(hdr[20:])) @@ -5242,10 +5452,11 @@ func (mb *msgBlock) compactWithFloor(floor uint64) { dlen := int(rl) - msgHdrSize // Do some quick sanity checks here. if dlen < 0 || slen > (dlen-recordHashSize) || dlen > int(rl) || index+rl > lbuf || rl > rlBadThresh { - return + return fmt.Errorf("sanity check failed") } // Only need to process non-deleted messages. seq := le.Uint64(hdr[4:]) + ts := int64(le.Uint64(hdr[12:])) if !isDeleted(seq) { // Check for tombstones. @@ -5259,11 +5470,17 @@ func (mb *msgBlock) compactWithFloor(floor uint64) { } } else { // Normal message here. + msgs++ nbuf = append(nbuf, buf[index:index+rl]...) if !firstSet { firstSet = true atomic.StoreUint64(&mb.first.seq, seq) } + if seq >= last { + last = seq + atomic.StoreUint64(&mb.last.seq, last) + mb.last.ts = ts + } } } // Advance to next record. @@ -5274,7 +5491,7 @@ func (mb *msgBlock) compactWithFloor(floor uint64) { if mb.cmp != NoCompression && len(nbuf) > 0 { cbuf, err := mb.cmp.Compress(nbuf) if err != nil { - return + return err } meta := &CompressionInfo{ Algorithm: mb.cmp, @@ -5288,7 +5505,7 @@ func (mb *msgBlock) compactWithFloor(floor uint64) { // Recreate to reset counter. rbek, err := genBlockEncryptionKey(mb.fs.fcfg.Cipher, mb.seed, mb.nonce) if err != nil { - return + return err } rbek.XORKeyStream(nbuf, nbuf) } @@ -5303,11 +5520,11 @@ func (mb *msgBlock) compactWithFloor(floor uint64) { dios <- struct{}{} if err != nil { os.Remove(mfn) - return + return err } if err := os.Rename(mfn, mb.mfn); err != nil { os.Remove(mfn) - return + return err } // Make sure to sync @@ -5326,12 +5543,26 @@ func (mb *msgBlock) compactWithFloor(floor uint64) { for seq, nfseq := fseq, atomic.LoadUint64(&mb.first.seq); seq < nfseq; seq++ { mb.dmap.Delete(seq) } + // Remove any seqs from the ending of the blk. + for seq, nlseq := lseq, atomic.LoadUint64(&mb.last.seq); seq > nlseq; seq-- { + mb.dmap.Delete(seq) + } + // If the block itself has no messages anymore (could still contain tombstones though), + // then we need to account for that by resetting the last sequence and timestamp. + if msgs == 0 { + atomic.StoreUint64(&mb.last.seq, fseq-1) + mb.last.ts = 0 + mb.dmap.Empty() + } // Make sure we clear the cache since no longer valid. mb.clearCacheAndOffset() // If we entered with the msgs loaded make sure to reload them. if wasLoaded { - mb.loadMsgsWithLock() + if err := mb.loadMsgsWithLock(); err != nil { + return err + } } + return nil } // Grab info from a slot. @@ -5646,9 +5877,7 @@ func (mb *msgBlock) truncate(tseq uint64, ts int64) (nmsgs, nbytes uint64, err e mb.resetPerSubjectInfo() // Load msgs again. - mb.loadMsgsWithLock() - - return purged, bytes, nil + return purged, bytes, mb.loadMsgsWithLock() } // Helper to determine if the mb is empty. @@ -6373,16 +6602,30 @@ func (mb *msgBlock) writeMsgRecordLocked(rl, seq uint64, subj string, mhdr, msg // Only update index and do accounting if not a delete tombstone. if seq&tbit == 0 { + last := atomic.LoadUint64(&mb.last.seq) // Accounting, do this before stripping ebit, it is ebit aware. mb.updateAccounting(seq, ts, rl) // Strip ebit if set. seq = seq &^ ebit - if mb.cache.fseq == 0 { - mb.cache.fseq = seq + // If we have a hole due to skipping many messages, fill it. + if len(mb.cache.idx) > 0 && last+1 < seq { + for dseq := last + 1; dseq < seq; dseq++ { + mb.cache.idx = append(mb.cache.idx, dbit) + } } // Write index - mb.cache.idx = append(mb.cache.idx, uint32(index)|cbit) + if mb.cache.idx = append(mb.cache.idx, uint32(index)|cbit); len(mb.cache.idx) == 1 { + mb.cache.fseq = seq + } } else { + // If the block is empty, still adjust the accounting accordingly. + tseq := seq &^ tbit + if mb.msgs == 0 && tseq > atomic.LoadUint64(&mb.last.seq) { + atomic.StoreUint64(&mb.last.seq, tseq) + mb.last.ts = ts + atomic.StoreUint64(&mb.first.seq, tseq+1) + mb.first.ts = 0 + } // Make sure to account for tombstones in rbytes. mb.rbytes += rl } @@ -6753,8 +6996,7 @@ func (mb *msgBlock) ensureRawBytesLoaded() error { // Sync msg and index files as needed. This is called from a timer. func (fs *fileStore) syncBlocks() { fs.mu.Lock() - // If closed or a snapshot is in progress bail. - if fs.closed || fs.sips > 0 { + if fs.closed { fs.mu.Unlock() return } @@ -6773,10 +7015,9 @@ func (fs *fileStore) syncBlocks() { continue } // See if we can close FDs due to being idle. - if mb.mfd != nil && mb.sinceLastWriteActivity() > closeFDsIdle { + if mb.mfd != nil && mb.sinceLastWriteActivity() > closeFDsIdle && mb.pendingWriteSizeLocked() == 0 { mb.dirtyCloseWithRemove(false) } - // If our first has moved and we are set to noCompact (which is from tombstones), // clear so that we might cleanup tombstones. if firstMoved && mb.noCompact { @@ -6790,12 +7031,10 @@ func (fs *fileStore) syncBlocks() { markDirty = true } + // Flush anything that may be pending. + mb.flushPendingMsgsLocked() // Check if we need to sync. We will not hold lock during actual sync. needSync := mb.needSync - if needSync { - // Flush anything that may be pending. - mb.flushPendingMsgsLocked() - } mb.mu.Unlock() // Check if we should compact here. @@ -6966,7 +7205,6 @@ func (mb *msgBlock) indexCacheBuf(buf []byte) error { mbFirstSeq := atomic.LoadUint64(&mb.first.seq) mbLastSeq := atomic.LoadUint64(&mb.last.seq) - fseq := mbFirstSeq // Sanity check here since we calculate size to allocate based on this. if mbFirstSeq > (mbLastSeq + 1) { // Purged state first == last + 1 @@ -6975,8 +7213,6 @@ func (mb *msgBlock) indexCacheBuf(buf []byte) error { return errCorruptState } - // Capture beginning size of dmap. - dms := uint64(mb.dmap.Size()) idxSz := mbLastSeq - mbFirstSeq + 1 if mb.cache == nil { @@ -7017,7 +7253,9 @@ func (mb *msgBlock) indexCacheBuf(buf []byte) error { var seq, ttls, schedules uint64 var sm StoreMsg // Used for finding TTL headers - // To ensure the sequence keeps moving up. + // To ensure the sequence keeps moving up. As well as confirming our index + // is aligned with the mb's first and last sequence. + var first uint64 var last uint64 for index < lbuf { @@ -7058,31 +7296,36 @@ func (mb *msgBlock) indexCacheBuf(buf []byte) error { index += rl continue } - last = seq - // We defer checksum checks to individual msg cache lookups to amortorize costs and // not introduce latency for first message from a newly loaded block. if seq >= mbFirstSeq { + last = seq + + // If the first sequence doesn't align with what we had in-memory, we need to rebuild. + if first == 0 { + first = seq + if mbFirstSeq != first { + return errCorruptState + } + } + // Track that we do not have holes. if slot := int(seq - mbFirstSeq); slot != len(idx) { // If we have a hole fill it. for dseq := mbFirstSeq + uint64(len(idx)); dseq < seq; dseq++ { idx = append(idx, dbit) - if dms == 0 && dseq != 0 { + if dseq != 0 { mb.dmap.Insert(dseq) } } } // Add to our index. idx = append(idx, index) - // Adjust if we guessed wrong. - if seq != 0 && seq < fseq { - fseq = seq - } // Make sure our dmap has this entry if it was erased. - if erased && dms == 0 && seq != 0 { - mb.dmap.Insert(seq) + // If not, that means this erased message was not accounted for in our in-memory state. + if erased && seq != 0 && !mb.dmap.Exists(seq) { + return errCorruptState } // Handle FSS inline here. @@ -7117,22 +7360,15 @@ func (mb *msgBlock) indexCacheBuf(buf []byte) error { index += rl } - // Track holes at the end of the block, these would be missed in the - // earlier loop if we've ran out of block file to look at, but should - // be easily noticed because the seq will be below the last seq from - // the index. - if seq > 0 && seq < mbLastSeq { - for dseq := seq; dseq < mbLastSeq; dseq++ { - idx = append(idx, dbit) - if dms == 0 { - mb.dmap.Insert(dseq) - } - } + // If we ended up with a smaller or larger index, or the first/last sequence + // doesn't align with what we had in-memory, we need to rebuild. + if len(idx) != int(idxSz) || (first > 0 && mbFirstSeq != first) || (last > 0 && mbLastSeq != last) { + return errCorruptState } mb.cache.buf = buf mb.cache.idx = idx - mb.cache.fseq = fseq + mb.cache.fseq = mbFirstSeq mb.cache.wp = int(lbuf) mb.ttls = ttls mb.schedules = schedules @@ -7177,8 +7413,10 @@ func (mb *msgBlock) flushPendingMsgsLocked() (*LostStreamData, error) { // Signals us that we need to rebuild filestore state. var fsLostData *LostStreamData + var weakenCache bool if mb.cache == nil { mb.cache = mb.ecache.Value() + weakenCache = mb.cache != nil } if mb.cache == nil || mb.mfd == nil { return nil, errNoCache @@ -7261,8 +7499,10 @@ func (mb *msgBlock) flushPendingMsgsLocked() (*LostStreamData, error) { // Check last access time. If we think the block still has read interest // then we will weaken the pointer but otherwise try to hold onto it. if ts := ats.AccessTime(); ts < mb.llts || (ts-mb.llts) <= int64(mb.cexp) { - mb.cache = nil - mb.ecache.Weaken() + if weakenCache { + mb.cache = nil + mb.ecache.Weaken() + } mb.resetCacheExpireTimer(0) return fsLostData, mb.werr } @@ -7440,11 +7680,12 @@ checkCache: if err := mb.indexCacheBuf(buf); err != nil { if err == errCorruptState { var ld *LostStreamData - if ld, _, err = mb.rebuildStateLocked(); ld != nil { - // We do not know if fs is locked or not at this point. - // This should be an exceptional condition so do so in Go routine. - go mb.fs.rebuildState(ld) - } + ld, _, err = mb.rebuildStateLocked() + // We do not know if fs is locked or not at this point. + // This should be an exceptional condition so do so in Go routine. + // Always rebuild the filestore's state if indexing fails, even if no data was lost, + // our in-memory state was stale in that case. + go mb.fs.rebuildState(ld) } if err != nil { return err @@ -7617,7 +7858,10 @@ func (mb *msgBlock) cacheLookupEx(seq uint64, sm *StoreMsg, doCopy bool) (*Store } else { reason = "cache buf empty" } - mb.fs.warn("Cache lookup detected no cache: %s", reason) + mb.fs.warn("Cache lookup for sequence %d in block %d detected no cache: %s", seq, mb.index, reason) + if mb.cache != nil { + mb.tryForceExpireCacheLocked() + } return nil, errNoCache } // Check partial cache status. @@ -7642,7 +7886,7 @@ func (mb *msgBlock) cacheLookupEx(seq uint64, sm *StoreMsg, doCopy bool) (*Store buf := mb.cache.buf[li:] // We use the high bit to denote we have already checked the checksum. - var hh hash.Hash64 + var hh *highwayhash.Digest64 if !hashChecked { hh = mb.hh // This will force the hash check in msgFromBuf. } @@ -7741,7 +7985,7 @@ func (fs *fileStore) msgForSeqLocked(seq uint64, sm *StoreMsg, needFSLock bool) // Internal function to return msg parts from a raw buffer. // Raw buffer will be copied into sm. // Lock should be held. -func (mb *msgBlock) msgFromBuf(buf []byte, sm *StoreMsg, hh hash.Hash64) (*StoreMsg, error) { +func (mb *msgBlock) msgFromBuf(buf []byte, sm *StoreMsg, hh *highwayhash.Digest64) (*StoreMsg, error) { return mb.msgFromBufEx(buf, sm, hh, true) } @@ -7749,14 +7993,14 @@ func (mb *msgBlock) msgFromBuf(buf []byte, sm *StoreMsg, hh hash.Hash64) (*Store // Raw buffer will NOT be copied into sm. // Only use for internal use, any message that is passed to upper layers should use mb.msgFromBuf. // Lock should be held. -func (mb *msgBlock) msgFromBufNoCopy(buf []byte, sm *StoreMsg, hh hash.Hash64) (*StoreMsg, error) { +func (mb *msgBlock) msgFromBufNoCopy(buf []byte, sm *StoreMsg, hh *highwayhash.Digest64) (*StoreMsg, error) { return mb.msgFromBufEx(buf, sm, hh, false) } // Internal function to return msg parts from a raw buffer. // copy boolean will determine if we make a copy or not. // Lock should be held. -func (mb *msgBlock) msgFromBufEx(buf []byte, sm *StoreMsg, hh hash.Hash64, doCopy bool) (*StoreMsg, error) { +func (mb *msgBlock) msgFromBufEx(buf []byte, sm *StoreMsg, hh *highwayhash.Digest64, doCopy bool) (*StoreMsg, error) { if len(buf) < emptyRecordLen { return nil, errBadMsg{mb.mfn, "record too short"} } @@ -8143,6 +8387,44 @@ func (fs *fileStore) LoadPrevMsg(start uint64, smp *StoreMsg) (sm *StoreMsg, err return nil, ErrStoreEOF } +// LoadPrevMsgMulti will find the previous message matching any entry in the sublist. +func (fs *fileStore) LoadPrevMsgMulti(sl *gsl.SimpleSublist, start uint64, smp *StoreMsg) (sm *StoreMsg, skip uint64, err error) { + if sl == nil { + sm, err = fs.LoadPrevMsg(start, smp) + return + } + fs.mu.RLock() + defer fs.mu.RUnlock() + + if fs.closed { + return nil, 0, ErrStoreClosed + } + if fs.state.Msgs == 0 || start < fs.state.FirstSeq { + return nil, fs.state.FirstSeq, ErrStoreEOF + } + if start > fs.state.LastSeq { + start = fs.state.LastSeq + } + + if bi, _ := fs.selectMsgBlockWithIndex(start); bi >= 0 { + for i := bi; i >= 0; i-- { + mb := fs.blks[i] + if sm, expireOk, err := mb.prevMatchingMulti(sl, start, smp); err == nil { + if expireOk { + mb.tryForceExpireCache() + } + return sm, sm.seq, nil + } else if err != ErrStoreMsgNotFound { + return nil, 0, err + } else if expireOk { + mb.tryForceExpireCache() + } + } + } + + return nil, fs.state.FirstSeq, ErrStoreEOF +} + // Type returns the type of the underlying store. func (fs *fileStore) Type() StorageType { return FileStorage @@ -8533,7 +8815,10 @@ func (fs *fileStore) PurgeEx(subject string, sequence, keep uint64) (purged uint } if mb.cacheNotLoaded() { - mb.loadMsgsWithLock() + if err := mb.loadMsgsWithLock(); err != nil { + mb.mu.Unlock() + return 0, err + } shouldExpire = true } @@ -8739,7 +9024,7 @@ func (fs *fileStore) purge(fseq uint64) (uint64, error) { atomic.StoreUint64(&lmb.last.seq, fs.state.LastSeq) lmb.last.ts = fs.state.LastTime.UnixNano() - if lseq := atomic.LoadUint64(&lmb.last.seq); lseq > 1 { + if lseq := atomic.LoadUint64(&lmb.last.seq); lseq > 0 { // Leave a tombstone so we can remember our starting sequence in case // full state becomes corrupted. fs.writeTombstone(lseq, lmb.last.ts) @@ -8912,12 +9197,21 @@ func (fs *fileStore) compact(seq uint64) (uint64, error) { if nbuf, err = smb.cmp.Compress(nbuf); err != nil { goto SKIP } + + // We will write to a new file and mv/rename it in case of failure. + mfn := filepath.Join(smb.fs.fcfg.StoreDir, msgDir, fmt.Sprintf(newScan, smb.index)) <-dios - err = os.WriteFile(smb.mfn, nbuf, defaultFilePerms) + err := os.WriteFile(mfn, nbuf, defaultFilePerms) dios <- struct{}{} if err != nil { + os.Remove(mfn) goto SKIP } + if err := os.Rename(mfn, smb.mfn); err != nil { + os.Remove(mfn) + goto SKIP + } + // Make sure to remove fss state. smb.fss = nil smb.clearCacheAndOffset() @@ -9001,10 +9295,6 @@ func (fs *fileStore) reset() error { fs.mu.Unlock() return ErrStoreClosed } - if fs.sips > 0 { - fs.mu.Unlock() - return ErrStoreSnapshotInProgress - } var purged, bytes uint64 cb := fs.scb @@ -9102,10 +9392,6 @@ func (fs *fileStore) Truncate(seq uint64) error { fs.mu.Unlock() return ErrStoreClosed } - if fs.sips > 0 { - fs.mu.Unlock() - return ErrStoreSnapshotInProgress - } // Any existing state file will no longer be applicable. We will force write a new one // at the end, after we release the lock. @@ -9347,6 +9633,57 @@ func (fs *fileStore) forceRemoveMsgBlock(mb *msgBlock) { fs.removeMsgBlockFromList(mb) } +// Purges and removes the msgBlock from the store. +// Lock should be held. +func (fs *fileStore) purgeMsgBlock(mb *msgBlock) { + mb.mu.Lock() + // Adjust per-subject tracking if present. + if err := mb.ensurePerSubjectInfoLoaded(); err == nil && mb.fss != nil { + mb.fss.IterFast(func(bsubj []byte, ss *SimpleState) bool { + subj := bytesToString(bsubj) + for range ss.Msgs { + fs.removePerSubject(subj) + } + return true + }) + } + // Clean up scheduled message metadata if we know this block contained any. + if fs.scheduling != nil && mb.schedules > 0 { + cacheLoaded := !mb.cacheNotLoaded() + if !cacheLoaded { + cacheLoaded = mb.loadMsgsWithLock() == nil + } + if cacheLoaded { + var smv StoreMsg + fseq, lseq := atomic.LoadUint64(&mb.first.seq), atomic.LoadUint64(&mb.last.seq) + for seq := fseq; seq <= lseq; seq++ { + sm, err := mb.cacheLookupNoCopy(seq, &smv) + if err != nil || sm == nil { + continue + } + if schedule, ok := getMessageSchedule(sm.hdr); ok && !schedule.IsZero() { + fs.scheduling.remove(seq) + } + } + } + } + // Update top level accounting. + msgs, bytes := mb.msgs, mb.bytes + if msgs > fs.state.Msgs { + msgs = fs.state.Msgs + } + if bytes > fs.state.Bytes { + bytes = fs.state.Bytes + } + fs.state.Msgs -= msgs + fs.state.Bytes -= bytes + fs.removeMsgBlock(mb) + mb.tryForceExpireCacheLocked() + mb.finishedWithCache() + mb.mu.Unlock() + fs.selectNextFirst() +} + // Called by purge to simply get rid of the cache and close our fds. // Lock should not be held. func (mb *msgBlock) dirtyClose() { @@ -9446,8 +9783,8 @@ func (mb *msgBlock) recalculateForSubj(subj string, ss *SimpleState) { if err := mb.loadMsgsWithLock(); err != nil { return } + defer mb.finishedWithCache() } - defer mb.finishedWithCache() startSlot := int(ss.First - mb.cache.fseq) if startSlot < 0 { @@ -9583,8 +9920,8 @@ func (mb *msgBlock) generatePerSubjectInfo() error { if mb.fss != nil { return nil } + defer mb.finishedWithCache() } - defer mb.finishedWithCache() // Create new one regardless. mb.fss = mb.fss.Empty() @@ -9872,12 +10209,17 @@ func timestampNormalized(t time.Time) int64 { // writeFullState will proceed to write the full meta state iff not complex and time consuming. // Since this is for quick recovery it is optional and should not block/stall normal operations. func (fs *fileStore) writeFullState() error { - return fs._writeFullState(false) + return fs._writeFullState(false, true) } -// forceWriteFullState will proceed to write the full meta state. This should only be called by stop() +// forceWriteFullState will proceed to write the full meta state. func (fs *fileStore) forceWriteFullState() error { - return fs._writeFullState(true) + return fs._writeFullState(true, true) +} + +// forceWriteFullStateLocked will proceed to write the full meta state. This should only be called by stop() +func (fs *fileStore) forceWriteFullStateLocked() error { + return fs._writeFullState(true, false) } // This will write the full binary state for the stream. @@ -9887,11 +10229,22 @@ func (fs *fileStore) forceWriteFullState() error { // 2. PSIM - Per Subject Index Map - Tracks first and last blocks with subjects present. // 3. MBs - Index, Bytes, First and Last Sequence and Timestamps, and the deleted map (avl.seqset). // 4. Last block index and hash of record inclusive to this stream state. -func (fs *fileStore) _writeFullState(force bool) error { +func (fs *fileStore) _writeFullState(force, needLock bool) error { + fsLock := func() { + if needLock { + fs.mu.Lock() + } + } + fsUnlock := func() { + if needLock { + fs.mu.Unlock() + } + } + start := time.Now() - fs.mu.Lock() + fsLock() if fs.closed || fs.dirty == 0 { - fs.mu.Unlock() + fsUnlock() return nil } @@ -9910,7 +10263,7 @@ func (fs *fileStore) _writeFullState(force bool) error { numDeleted = int((fs.state.LastSeq - fs.state.FirstSeq + 1) - fs.state.Msgs) } if numSubjects > numThreshold || numDeleted > numThreshold { - fs.mu.Unlock() + fsUnlock() return errStateTooBig } } @@ -10018,13 +10371,15 @@ func (fs *fileStore) _writeFullState(force bool) error { // Encrypt if needed. if fs.prf != nil { if err := fs.setupAEK(); err != nil { - fs.mu.Unlock() + fsUnlock() return err } nonce := make([]byte, fs.aek.NonceSize(), fs.aek.NonceSize()+len(buf)+fs.aek.Overhead()) if n, err := rand.Read(nonce); err != nil { + fsUnlock() return err } else if n != len(nonce) { + fsUnlock() return fmt.Errorf("not enough nonce bytes read (%d != %d)", n, len(nonce)) } buf = fs.aek.Seal(nonce, nonce, buf, nil) @@ -10041,13 +10396,17 @@ func (fs *fileStore) _writeFullState(force bool) error { statesEqual := trackingStatesEqual(&fs.state, &mstate) // Release lock. - fs.mu.Unlock() + fsUnlock() // Check consistency here. if !statesEqual { fs.warn("Stream state encountered internal inconsistency on write") // Rebuild our fs state from the mb state. - fs.rebuildState(nil) + if needLock { + fs.rebuildState(nil) + } else { + fs.rebuildStateLocked(nil) + } return errCorruptState } @@ -10072,14 +10431,14 @@ func (fs *fileStore) _writeFullState(force bool) error { // Update dirty if successful. if err == nil { - fs.mu.Lock() + fsLock() fs.dirty -= priorDirty - fs.mu.Unlock() + fsUnlock() } // Attempt to write both files, an error in one should not prevent the other from being written. - ttlErr := fs.writeTTLState() - schedErr := fs.writeMsgSchedulingState() + ttlErr := fs.writeTTLState(needLock) + schedErr := fs.writeMsgSchedulingState(needLock) if ttlErr != nil { return ttlErr } else if schedErr != nil { @@ -10088,30 +10447,42 @@ func (fs *fileStore) _writeFullState(force bool) error { return nil } -func (fs *fileStore) writeTTLState() error { - fs.mu.RLock() +func (fs *fileStore) writeTTLState(needLock bool) error { + if needLock { + fs.mu.RLock() + } if fs.ttls == nil { - fs.mu.RUnlock() + if needLock { + fs.mu.RUnlock() + } return nil } fn := filepath.Join(fs.fcfg.StoreDir, msgDir, ttlStreamStateFile) // Must be lseq+1 to identify up to which sequence the TTLs are valid. buf := fs.ttls.Encode(fs.state.LastSeq + 1) - fs.mu.RUnlock() + if needLock { + fs.mu.RUnlock() + } return fs.writeFileWithOptionalSync(fn, buf, defaultFilePerms) } -func (fs *fileStore) writeMsgSchedulingState() error { - fs.mu.RLock() +func (fs *fileStore) writeMsgSchedulingState(needLock bool) error { + if needLock { + fs.mu.RLock() + } if fs.scheduling == nil { - fs.mu.RUnlock() + if needLock { + fs.mu.RUnlock() + } return nil } fn := filepath.Join(fs.fcfg.StoreDir, msgDir, msgSchedulingStreamStateFile) // Must be lseq+1 to identify up to which sequence the schedules are valid. buf := fs.scheduling.encode(fs.state.LastSeq + 1) - fs.mu.RUnlock() + if needLock { + fs.mu.RUnlock() + } return fs.writeFileWithOptionalSync(fn, buf, defaultFilePerms) } @@ -10129,18 +10500,10 @@ func (fs *fileStore) stop(delete, writeState bool) error { return ErrStoreClosed } - // Mark as closing. Do before releasing the lock to writeFullState + // Mark as closing. Do before releasing the lock to wait on the state flush loop // so we don't end up with this function running more than once. fs.closing = true - if writeState { - fs.checkAndFlushLastBlock() - } - fs.closeAllMsgBlocks(false) - - fs.cancelSyncTimer() - fs.cancelAgeChk() - // Release the state flusher loop. if fs.qch != nil { close(fs.qch) @@ -10152,9 +10515,18 @@ func (fs *fileStore) stop(delete, writeState bool) error { fsld := fs.fsld fs.mu.Unlock() <-fsld - // Write full state if needed. If not dirty this is a no-op. - fs.forceWriteFullState() fs.mu.Lock() + + fs.checkAndFlushLastBlock() + } + fs.closeAllMsgBlocks(false) + + fs.cancelSyncTimer() + fs.cancelAgeChk() + + if writeState { + // Write full state if needed. If not dirty this is a no-op. + fs.forceWriteFullStateLocked() } // Mark as closed. Last message block needs to be cleared after @@ -10248,7 +10620,8 @@ func (fs *fileStore) streamSnapshot(w io.WriteCloser, includeConsumers bool, err hh := fs.hh hh.Reset() hh.Write(meta) - sum := []byte(hex.EncodeToString(fs.hh.Sum(nil))) + var hb [highwayhash.Size64]byte + sum := []byte(hex.EncodeToString(fs.hh.Sum(hb[:0]))) fs.mu.Unlock() // Meta first. @@ -10351,7 +10724,8 @@ func (fs *fileStore) streamSnapshot(w io.WriteCloser, includeConsumers bool, err } o.hh.Reset() o.hh.Write(meta) - sum := []byte(hex.EncodeToString(o.hh.Sum(nil))) + var hb [highwayhash.Size64]byte + sum := []byte(hex.EncodeToString(o.hh.Sum(hb[:0]))) // We can have the running state directly encoded now. state, err := o.encodeState() @@ -10568,7 +10942,7 @@ type consumerFileStore struct { name string odir string ifn string - hh hash.Hash64 + hh *highwayhash.Digest64 state ConsumerState fch chan struct{} qch chan struct{} @@ -10578,7 +10952,7 @@ type consumerFileStore struct { closed bool } -func (fs *fileStore) ConsumerStore(name string, cfg *ConsumerConfig) (ConsumerStore, error) { +func (fs *fileStore) ConsumerStore(name string, created time.Time, cfg *ConsumerConfig) (ConsumerStore, error) { if fs == nil { return nil, fmt.Errorf("filestore is nil") } @@ -10601,7 +10975,7 @@ func (fs *fileStore) ConsumerStore(name string, cfg *ConsumerConfig) (ConsumerSt if err := os.MkdirAll(odir, defaultDirPerms); err != nil { return nil, fmt.Errorf("could not create consumer directory - %v", err) } - csi := &FileConsumerInfo{Name: name, Created: time.Now().UTC(), ConsumerConfig: *cfg} + csi := &FileConsumerInfo{Name: name, Created: created, ConsumerConfig: *cfg} o := &consumerFileStore{ fs: fs, cfg: csi, @@ -10611,7 +10985,7 @@ func (fs *fileStore) ConsumerStore(name string, cfg *ConsumerConfig) (ConsumerSt ifn: filepath.Join(odir, consumerState), } key := sha256.Sum256([]byte(fs.cfg.Name + "/" + name)) - hh, err := highwayhash.New64(key[:]) + hh, err := highwayhash.NewDigest64(key[:]) if err != nil { return nil, fmt.Errorf("could not create hash: %v", err) } @@ -10658,7 +11032,6 @@ func (fs *fileStore) ConsumerStore(name string, cfg *ConsumerConfig) (ConsumerSt meta := filepath.Join(odir, JetStreamMetaFile) if _, err := os.Stat(meta); err != nil && os.IsNotExist(err) { didCreate = true - csi.Created = time.Now().UTC() if err := o.writeConsumerMeta(); err != nil { os.RemoveAll(odir) return nil, err @@ -11246,7 +11619,8 @@ func (cfs *consumerFileStore) writeConsumerMeta() error { } cfs.hh.Reset() cfs.hh.Write(b) - checksum := hex.EncodeToString(cfs.hh.Sum(nil)) + var hb [highwayhash.Size64]byte + checksum := hex.EncodeToString(cfs.hh.Sum(hb[:0])) sum := filepath.Join(cfs.odir, JetStreamMetaFileSum) err = cfs.fs.writeFileWithOptionalSync(sum, []byte(checksum), defaultFilePerms) @@ -11630,14 +12004,14 @@ func (fs *fileStore) RemoveConsumer(o ConsumerStore) error { // Deprecated: stream templates are deprecated and will be removed in a future version. type templateFileStore struct { dir string - hh hash.Hash64 + hh *highwayhash.Digest64 } // Deprecated: stream templates are deprecated and will be removed in a future version. func newTemplateFileStore(storeDir string) *templateFileStore { tdir := filepath.Join(storeDir, tmplsDir) key := sha256.Sum256([]byte("templates")) - hh, err := highwayhash.New64(key[:]) + hh, err := highwayhash.NewDigest64(key[:]) if err != nil { return nil } @@ -11666,7 +12040,8 @@ func (ts *templateFileStore) Store(t *streamTemplate) error { // FIXME(dlc) - Do checksum ts.hh.Reset() ts.hh.Write(b) - checksum := hex.EncodeToString(ts.hh.Sum(nil)) + var hb [highwayhash.Size64]byte + checksum := hex.EncodeToString(ts.hh.Sum(hb[:0])) sum := filepath.Join(dir, JetStreamMetaFileSum) if err := os.WriteFile(sum, []byte(checksum), defaultFilePerms); err != nil { return err diff --git a/vendor/github.com/nats-io/nats-server/v2/server/gateway.go b/vendor/github.com/nats-io/nats-server/v2/server/gateway.go index 085f0e18f54..f4982cc2ce6 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/gateway.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/gateway.go @@ -2561,11 +2561,18 @@ func (c *client) sendMsgToGateways(acc *Account, msg, subject, reply []byte, qgr return false } + // Copy off original pa in case it changes. + pa := c.pa + mt, _ := c.isMsgTraceEnabled() if mt != nil { - pa := c.pa + // We are going to replace "pa" with our copy of c.pa, but to restore + // to the original copy of c.pa, we need to save it again. + cpa := c.pa msg = mt.setOriginAccountHeaderIfNeeded(c, acc, msg) - defer func() { c.pa = pa }() + defer func() { c.pa = cpa }() + // Update pa with our current c.pa state. + pa = c.pa } var ( @@ -2579,6 +2586,7 @@ func (c *client) sendMsgToGateways(acc *Account, msg, subject, reply []byte, qgr didDeliver bool prodIsMQTT = c.isMqtt() dlvMsgs int64 + dlvExtraSz int64 ) // Get a subscription from the pool @@ -2676,8 +2684,11 @@ func (c *client) sendMsgToGateways(acc *Account, msg, subject, reply []byte, qgr } } + // Assume original message + dmsg := msg if mt != nil { - msg = mt.setHopHeader(c, msg) + // If trace is enabled, we need to set the hop header per gateway. + dmsg = mt.setHopHeader(c, dmsg) } // Setup the message header. @@ -2727,16 +2738,22 @@ func (c *client) sendMsgToGateways(acc *Account, msg, subject, reply []byte, qgr sub.nm, sub.max = 0, 0 sub.client = gwc sub.subject = subject - if c.deliverMsg(prodIsMQTT, sub, acc, subject, mreply, mh, msg, false) { + if c.deliverMsg(prodIsMQTT, sub, acc, subject, mreply, mh, dmsg, false) { // We don't count internal deliveries so count only if sub.icb is nil if sub.icb == nil { dlvMsgs++ + dlvExtraSz += int64(len(dmsg) - len(msg)) } didDeliver = true } + + // If we set the header reset the origin pub args. + if mt != nil { + c.pa = pa + } } if dlvMsgs > 0 { - totalBytes := dlvMsgs * int64(len(msg)) + totalBytes := dlvMsgs*int64(len(msg)) + dlvExtraSz // For non MQTT producers, remove the CR_LF * number of messages if !prodIsMQTT { totalBytes -= dlvMsgs * int64(LEN_CR_LF) diff --git a/vendor/github.com/nats-io/nats-server/v2/server/gsl/gsl.go b/vendor/github.com/nats-io/nats-server/v2/server/gsl/gsl.go index 55f9bad98bd..88274dd234c 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/gsl/gsl.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/gsl/gsl.go @@ -15,6 +15,7 @@ package gsl import ( "errors" + "strings" "sync" "unsafe" @@ -50,6 +51,11 @@ var ( // unnecessary allocations. type SimpleSublist = GenericSublist[struct{}] +// NewSimpleSublist will create a simple sublist. +func NewSimpleSublist() *SimpleSublist { + return &GenericSublist[struct{}]{root: newLevel[struct{}]()} +} + // A GenericSublist stores and efficiently retrieves subscriptions. type GenericSublist[T comparable] struct { sync.RWMutex @@ -87,24 +93,13 @@ func NewSublist[T comparable]() *GenericSublist[T] { // Insert adds a subscription into the sublist func (s *GenericSublist[T]) Insert(subject string, value T) error { - tsa := [32]string{} - tokens := tsa[:0] - start := 0 - for i := 0; i < len(subject); i++ { - if subject[i] == btsep { - tokens = append(tokens, subject[start:i]) - start = i + 1 - } - } - tokens = append(tokens, subject[start:]) - s.Lock() var sfwc bool var n *node[T] l := s.root - for _, t := range tokens { + for t := range strings.SplitSeq(subject, tsep) { lt := len(t) if lt == 0 || sfwc { s.Unlock() @@ -312,17 +307,6 @@ type lnt[T comparable] struct { // Raw low level remove, can do batches with lock held outside. func (s *GenericSublist[T]) remove(subject string, value T, shouldLock bool) error { - tsa := [32]string{} - tokens := tsa[:0] - start := 0 - for i := 0; i < len(subject); i++ { - if subject[i] == btsep { - tokens = append(tokens, subject[start:i]) - start = i + 1 - } - } - tokens = append(tokens, subject[start:]) - if shouldLock { s.Lock() defer s.Unlock() @@ -336,7 +320,7 @@ func (s *GenericSublist[T]) remove(subject string, value T, shouldLock bool) err var lnts [32]lnt[T] levels := lnts[:0] - for _, t := range tokens { + for t := range strings.SplitSeq(subject, tsep) { lt := len(t) if lt == 0 || sfwc { return ErrInvalidSubject diff --git a/vendor/github.com/nats-io/nats-server/v2/server/jetstream.go b/vendor/github.com/nats-io/nats-server/v2/server/jetstream.go index 0aa2e527aaf..78763f3ece2 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/jetstream.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/jetstream.go @@ -194,6 +194,11 @@ func (s *Server) EnableJetStream(config *JetStreamConfig) error { } s.Noticef("Starting JetStream") + start := time.Now() + defer func() { + s.Noticef("Took %s to start JetStream", time.Since(start)) + }() + if config == nil || config.MaxMemory <= 0 || config.MaxStore <= 0 { var storeDir, domain, uniqueTag string var maxStore, maxMem int64 @@ -686,6 +691,11 @@ func (s *Server) DisableJetStream() error { } func (s *Server) enableJetStreamAccounts() error { + // Reuse the same task workers across all accounts, so that we don't explode + // with a large number of goroutines on multi-account systems. + tq := parallelTaskQueue(len(dios)) + defer close(tq) + // If we have no configured accounts setup then setup imports on global account. if s.globalAccountOnly() { gacc := s.GlobalAccount() @@ -694,10 +704,10 @@ func (s *Server) enableJetStreamAccounts() error { gacc.jsLimits = defaultJSAccountTiers } gacc.mu.Unlock() - if err := s.configJetStream(gacc); err != nil { + if err := s.configJetStream(gacc, tq); err != nil { return err } - } else if err := s.configAllJetStreamAccounts(); err != nil { + } else if err := s.configAllJetStreamAccounts(tq); err != nil { return fmt.Errorf("Error enabling jetstream on configured accounts: %v", err) } return nil @@ -761,7 +771,7 @@ func (a *Account) enableJetStreamInfoServiceImportOnly() error { return a.enableAllJetStreamServiceImportsAndMappings() } -func (s *Server) configJetStream(acc *Account) error { +func (s *Server) configJetStream(acc *Account, tq chan<- func()) error { if acc == nil { return nil } @@ -778,7 +788,7 @@ func (s *Server) configJetStream(acc *Account) error { return err } } else { - if err := acc.EnableJetStream(jsLimits); err != nil { + if err := acc.EnableJetStream(jsLimits, tq); err != nil { return err } if s.gateway.enabled { @@ -799,7 +809,7 @@ func (s *Server) configJetStream(acc *Account) error { } // configAllJetStreamAccounts walk all configured accounts and turn on jetstream if requested. -func (s *Server) configAllJetStreamAccounts() error { +func (s *Server) configAllJetStreamAccounts(tq chan<- func()) error { // Check to see if system account has been enabled. We could arrive here via reload and // a non-default system account. s.checkJetStreamExports() @@ -839,7 +849,7 @@ func (s *Server) configAllJetStreamAccounts() error { // Process any jetstream enabled accounts here. These will be accounts we are // already aware of at startup etc. for _, acc := range jsAccounts { - if err := s.configJetStream(acc); err != nil { + if err := s.configJetStream(acc, tq); err != nil { return err } } @@ -852,7 +862,7 @@ func (s *Server) configAllJetStreamAccounts() error { // Only load up ones not already loaded since they are processed above. if _, ok := accounts.Load(accName); !ok { if acc, err := s.lookupAccount(accName); err != nil && acc != nil { - if err := s.configJetStream(acc); err != nil { + if err := s.configJetStream(acc, tq); err != nil { return err } } @@ -1013,11 +1023,11 @@ func (s *Server) shutdownJetStream() { js.accounts = nil var qch chan struct{} - + var stopped chan struct{} if cc := js.cluster; cc != nil { if cc.qch != nil { - qch = cc.qch - cc.qch = nil + qch, stopped = cc.qch, cc.stopped + cc.qch, cc.stopped = nil, nil } js.stopUpdatesSub() if cc.c != nil { @@ -1034,14 +1044,11 @@ func (s *Server) shutdownJetStream() { // We will wait for a bit for it to close. // Do this without the lock. if qch != nil { + close(qch) // Must be close() to signal *all* listeners select { - case qch <- struct{}{}: - select { - case <-qch: - case <-time.After(2 * time.Second): - s.Warnf("Did not receive signal for successful shutdown of cluster routine") - } - default: + case <-stopped: + case <-time.After(10 * time.Second): + s.Warnf("Did not receive signal for successful shutdown of cluster routine") } } } @@ -1100,7 +1107,7 @@ func (a *Account) assignJetStreamLimits(limits map[string]JetStreamAccountLimits // EnableJetStream will enable JetStream on this account with the defined limits. // This is a helper for JetStreamEnableAccount. -func (a *Account) EnableJetStream(limits map[string]JetStreamAccountLimits) error { +func (a *Account) EnableJetStream(limits map[string]JetStreamAccountLimits, tq chan<- func()) error { a.mu.RLock() s := a.srv a.mu.RUnlock() @@ -1211,7 +1218,7 @@ func (a *Account) EnableJetStream(limits map[string]JetStreamAccountLimits) erro tdir := filepath.Join(jsa.storeDir, tmplsDir) if stat, err := os.Stat(tdir); err == nil && stat.IsDir() { key := sha256.Sum256([]byte("templates")) - hh, err := highwayhash.New64(key[:]) + hh, err := highwayhash.NewDigest64(key[:]) if err != nil { return err } @@ -1235,7 +1242,8 @@ func (a *Account) EnableJetStream(limits map[string]JetStreamAccountLimits) erro } hh.Reset() hh.Write(buf) - checksum := hex.EncodeToString(hh.Sum(nil)) + var hb [highwayhash.Size64]byte + checksum := hex.EncodeToString(hh.Sum(hb[:0])) if checksum != string(sum) { s.Warnf(" StreamTemplate checksums do not match %q vs %q", sum, checksum) continue @@ -1253,33 +1261,142 @@ func (a *Account) EnableJetStream(limits map[string]JetStreamAccountLimits) erro } } - // Collect consumers, do after all streams. - type ce struct { - mset *stream - odir string - } - var consumers []*ce - - // Collect any interest policy streams to check for - // https://github.com/nats-io/nats-server/issues/3612 - var ipstreams []*stream - // Remember if we should be encrypted and what cipher we think we should use. encrypted := s.getOpts().JetStreamKey != _EMPTY_ - plaintext := true sc := s.getOpts().JetStreamCipher + doConsumers := func(mset *stream, odir string) { + ofis, _ := os.ReadDir(odir) + if len(ofis) > 0 { + s.Noticef(" Recovering %d consumers for stream - '%s > %s'", len(ofis), mset.accName(), mset.name()) + } + for _, ofi := range ofis { + metafile := filepath.Join(odir, ofi.Name(), JetStreamMetaFile) + metasum := filepath.Join(odir, ofi.Name(), JetStreamMetaFileSum) + if _, err := os.Stat(metafile); os.IsNotExist(err) { + s.Warnf(" Missing consumer metafile %q", metafile) + continue + } + buf, err := os.ReadFile(metafile) + if err != nil { + s.Warnf(" Error reading consumer metafile %q: %v", metafile, err) + continue + } + if _, err := os.Stat(metasum); os.IsNotExist(err) { + s.Warnf(" Missing consumer checksum for %q", metasum) + continue + } + + // Check if we are encrypted. + if key, err := os.ReadFile(filepath.Join(odir, ofi.Name(), JetStreamMetaFileKey)); err == nil { + s.Debugf(" Consumer metafile is encrypted, reading encrypted keyfile") + // Decode the buffer before proceeding. + ctxName := mset.name() + tsep + ofi.Name() + nbuf, _, err := s.decryptMeta(sc, key, buf, a.Name, ctxName) + if err != nil { + s.Warnf(" Error decrypting our consumer metafile: %v", err) + continue + } + buf = nbuf + } + + var cfg FileConsumerInfo + decoder := json.NewDecoder(bytes.NewReader(buf)) + decoder.DisallowUnknownFields() + strictErr := decoder.Decode(&cfg) + if strictErr != nil { + cfg = FileConsumerInfo{} + if err := json.Unmarshal(buf, &cfg); err != nil { + s.Warnf(" Error unmarshalling consumer metafile %q: %v", metafile, err) + continue + } + } + if supported := supportsRequiredApiLevel(cfg.Metadata); !supported || strictErr != nil { + var offlineReason string + if !supported { + apiLevel := getRequiredApiLevel(cfg.Metadata) + if strictErr != nil { + offlineReason = fmt.Sprintf("unsupported - config error: %s", strings.TrimPrefix(strictErr.Error(), "json: ")) + } else { + offlineReason = fmt.Sprintf("unsupported - required API level: %s, current API level: %d", apiLevel, JSApiLevel) + } + s.Warnf(" Detected unsupported consumer '%s > %s > %s': %s", a.Name, mset.name(), cfg.Name, offlineReason) + } else { + offlineReason = fmt.Sprintf("decoding error: %v", strictErr) + s.Warnf(" Error unmarshalling consumer metafile %q: %v", metafile, strictErr) + } + singleServerMode := !s.JetStreamIsClustered() && s.standAloneMode() + if singleServerMode { + if !mset.closed.Load() { + s.Warnf(" Stopping unsupported stream '%s > %s'", a.Name, mset.name()) + mset.mu.Lock() + mset.offlineReason = fmt.Sprintf("stopped - unsupported consumer %q", cfg.Name) + mset.mu.Unlock() + mset.stop(false, false) + } + + // Fake a consumer, so we can respond to API requests as single-server. + o := &consumer{ + mset: mset, + js: s.getJetStream(), + acc: a, + srv: s, + cfg: cfg.ConsumerConfig, + active: false, + stream: mset.name(), + name: cfg.Name, + dseq: 1, + sseq: 1, + created: time.Now().UTC(), + closed: true, + offlineReason: offlineReason, + } + if !cfg.Created.IsZero() { + o.created = cfg.Created + } + + mset.mu.Lock() + mset.setConsumer(o) + mset.mu.Unlock() + } + continue + } + + isEphemeral := !isDurableConsumer(&cfg.ConsumerConfig) + if isEphemeral { + // This is an ephemeral consumer and this could fail on restart until + // the consumer can reconnect. We will create it as a durable and switch it. + cfg.ConsumerConfig.Durable = ofi.Name() + } + obs, err := mset.addConsumerWithAssignment(&cfg.ConsumerConfig, _EMPTY_, nil, true, ActionCreateOrUpdate, false) + if err != nil { + s.Warnf(" Error adding consumer %q: %v", cfg.Name, err) + continue + } + if isEphemeral { + obs.switchToEphemeral() + } + if !cfg.Created.IsZero() { + obs.setCreatedTime(cfg.Created) + } + if err != nil { + s.Warnf(" Error restoring consumer %q state: %v", cfg.Name, err) + } + } + } + // Now recover the streams. fis, _ := os.ReadDir(sdir) - for _, fi := range fis { + doStream := func(fi os.DirEntry) error { + plaintext := true mdir := filepath.Join(sdir, fi.Name()) // Check for partially deleted streams. They are marked with "." prefix. if strings.HasPrefix(fi.Name(), tsep) { go os.RemoveAll(mdir) - continue + return nil } key := sha256.Sum256([]byte(fi.Name())) - hh, err := highwayhash.New64(key[:]) + hh, err := highwayhash.NewDigest64(key[:]) if err != nil { return err } @@ -1287,27 +1404,28 @@ func (a *Account) EnableJetStream(limits map[string]JetStreamAccountLimits) erro metasum := filepath.Join(mdir, JetStreamMetaFileSum) if _, err := os.Stat(metafile); os.IsNotExist(err) { s.Warnf(" Missing stream metafile for %q", metafile) - continue + return nil } buf, err := os.ReadFile(metafile) if err != nil { s.Warnf(" Error reading metafile %q: %v", metafile, err) - continue + return nil } if _, err := os.Stat(metasum); os.IsNotExist(err) { s.Warnf(" Missing stream checksum file %q", metasum) - continue + return nil } sum, err := os.ReadFile(metasum) if err != nil { s.Warnf(" Error reading Stream metafile checksum %q: %v", metasum, err) - continue + return nil } hh.Write(buf) - checksum := hex.EncodeToString(hh.Sum(nil)) + var hb [highwayhash.Size64]byte + checksum := hex.EncodeToString(hh.Sum(hb[:0])) if checksum != string(sum) { s.Warnf(" Stream metafile %q: checksums do not match %q vs %q", metafile, sum, checksum) - continue + return nil } // Track if we are converting ciphers. @@ -1320,14 +1438,14 @@ func (a *Account) EnableJetStream(limits map[string]JetStreamAccountLimits) erro s.Debugf(" Stream metafile is encrypted, reading encrypted keyfile") if len(keyBuf) < minMetaKeySize { s.Warnf(" Bad stream encryption key length of %d", len(keyBuf)) - continue + return nil } // Decode the buffer before proceeding. var nbuf []byte nbuf, convertingCiphers, err = s.decryptMeta(sc, keyBuf, buf, a.Name, fi.Name()) if err != nil { s.Warnf(" Error decrypting our stream metafile: %v", err) - continue + return nil } buf = nbuf plaintext = false @@ -1341,7 +1459,7 @@ func (a *Account) EnableJetStream(limits map[string]JetStreamAccountLimits) erro cfg = FileStreamInfo{} if err := json.Unmarshal(buf, &cfg); err != nil { s.Warnf(" Error unmarshalling stream metafile %q: %v", metafile, err) - continue + return nil } } if supported := supportsRequiredApiLevel(cfg.Metadata); !supported || strictErr != nil { @@ -1384,13 +1502,16 @@ func (a *Account) EnableJetStream(limits map[string]JetStreamAccountLimits) erro // Now do the consumers. odir := filepath.Join(sdir, fi.Name(), consumerDir) - consumers = append(consumers, &ce{mset, odir}) + doConsumers(mset, odir) } - continue + return nil } if cfg.Template != _EMPTY_ { - if err := jsa.addStreamNameToTemplate(cfg.Template, cfg.Name); err != nil { + jsa.mu.Lock() + err := jsa.addStreamNameToTemplate(cfg.Template, cfg.Name) + jsa.mu.Unlock() + if err != nil { s.Warnf(" Error adding stream %q to template %q: %v", cfg.Name, cfg.Template, err) } } @@ -1415,7 +1536,7 @@ func (a *Account) EnableJetStream(limits map[string]JetStreamAccountLimits) erro } } if hadSubjErr { - continue + return nil } // The other possible bug is assigning subjects to mirrors, so check for that and patch as well. @@ -1449,7 +1570,7 @@ func (a *Account) EnableJetStream(limits map[string]JetStreamAccountLimits) erro s.Warnf(" Error replacing meta keyfile for stream %q: %v", cfg.Name, err) } } - continue + return nil } if !cfg.Created.IsZero() { mset.setCreatedTime(cfg.Created) @@ -1514,146 +1635,41 @@ func (a *Account) EnableJetStream(limits map[string]JetStreamAccountLimits) erro s.Noticef(" Restored %s messages for stream '%s > %s' in %v", comma(int64(state.Msgs)), mset.accName(), mset.name(), time.Since(rt).Round(time.Millisecond)) + // Now do the consumers. + odir := filepath.Join(sdir, fi.Name(), consumerDir) + doConsumers(mset, odir) + // Collect to check for dangling messages. // TODO(dlc) - Can be removed eventually. if cfg.StreamConfig.Retention == InterestPolicy { - ipstreams = append(ipstreams, mset) + mset.checkForOrphanMsgs() + mset.checkConsumerReplication() } - // Now do the consumers. - odir := filepath.Join(sdir, fi.Name(), consumerDir) - consumers = append(consumers, &ce{mset, odir}) + return nil } - for _, e := range consumers { - ofis, _ := os.ReadDir(e.odir) - if len(ofis) > 0 { - s.Noticef(" Recovering %d consumers for stream - '%s > %s'", len(ofis), e.mset.accName(), e.mset.name()) + if tq != nil { + // If a parallelTaskQueue was provided then use that for concurrency. + var wg sync.WaitGroup + wg.Add(len(fis)) + for _, fi := range fis { + tq <- func() { + doStream(fi) + wg.Done() + } } - for _, ofi := range ofis { - metafile := filepath.Join(e.odir, ofi.Name(), JetStreamMetaFile) - metasum := filepath.Join(e.odir, ofi.Name(), JetStreamMetaFileSum) - if _, err := os.Stat(metafile); os.IsNotExist(err) { - s.Warnf(" Missing consumer metafile %q", metafile) - continue - } - buf, err := os.ReadFile(metafile) - if err != nil { - s.Warnf(" Error reading consumer metafile %q: %v", metafile, err) - continue - } - if _, err := os.Stat(metasum); os.IsNotExist(err) { - s.Warnf(" Missing consumer checksum for %q", metasum) - continue - } - - // Check if we are encrypted. - if key, err := os.ReadFile(filepath.Join(e.odir, ofi.Name(), JetStreamMetaFileKey)); err == nil { - s.Debugf(" Consumer metafile is encrypted, reading encrypted keyfile") - // Decode the buffer before proceeding. - ctxName := e.mset.name() + tsep + ofi.Name() - nbuf, _, err := s.decryptMeta(sc, key, buf, a.Name, ctxName) - if err != nil { - s.Warnf(" Error decrypting our consumer metafile: %v", err) - continue - } - buf = nbuf - } - - var cfg FileConsumerInfo - decoder := json.NewDecoder(bytes.NewReader(buf)) - decoder.DisallowUnknownFields() - strictErr := decoder.Decode(&cfg) - if strictErr != nil { - cfg = FileConsumerInfo{} - if err := json.Unmarshal(buf, &cfg); err != nil { - s.Warnf(" Error unmarshalling consumer metafile %q: %v", metafile, err) - continue - } - } - if supported := supportsRequiredApiLevel(cfg.Metadata); !supported || strictErr != nil { - var offlineReason string - if !supported { - apiLevel := getRequiredApiLevel(cfg.Metadata) - if strictErr != nil { - offlineReason = fmt.Sprintf("unsupported - config error: %s", strings.TrimPrefix(strictErr.Error(), "json: ")) - } else { - offlineReason = fmt.Sprintf("unsupported - required API level: %s, current API level: %d", apiLevel, JSApiLevel) - } - s.Warnf(" Detected unsupported consumer '%s > %s > %s': %s", a.Name, e.mset.name(), cfg.Name, offlineReason) - } else { - offlineReason = fmt.Sprintf("decoding error: %v", strictErr) - s.Warnf(" Error unmarshalling consumer metafile %q: %v", metafile, strictErr) - } - singleServerMode := !s.JetStreamIsClustered() && s.standAloneMode() - if singleServerMode { - if !e.mset.closed.Load() { - s.Warnf(" Stopping unsupported stream '%s > %s'", a.Name, e.mset.name()) - e.mset.mu.Lock() - e.mset.offlineReason = fmt.Sprintf("stopped - unsupported consumer %q", cfg.Name) - e.mset.mu.Unlock() - e.mset.stop(false, false) - } - - // Fake a consumer, so we can respond to API requests as single-server. - o := &consumer{ - mset: e.mset, - js: s.getJetStream(), - acc: a, - srv: s, - cfg: cfg.ConsumerConfig, - active: false, - stream: e.mset.name(), - name: cfg.Name, - dseq: 1, - sseq: 1, - created: time.Now().UTC(), - closed: true, - offlineReason: offlineReason, - } - if !cfg.Created.IsZero() { - o.created = cfg.Created - } - - e.mset.mu.Lock() - e.mset.setConsumer(o) - e.mset.mu.Unlock() - } - continue - } - - isEphemeral := !isDurableConsumer(&cfg.ConsumerConfig) - if isEphemeral { - // This is an ephemeral consumer and this could fail on restart until - // the consumer can reconnect. We will create it as a durable and switch it. - cfg.ConsumerConfig.Durable = ofi.Name() - } - obs, err := e.mset.addConsumerWithAssignment(&cfg.ConsumerConfig, _EMPTY_, nil, true, ActionCreateOrUpdate, false) - if err != nil { - s.Warnf(" Error adding consumer %q: %v", cfg.Name, err) - continue - } - if isEphemeral { - obs.switchToEphemeral() - } - if !cfg.Created.IsZero() { - obs.setCreatedTime(cfg.Created) - } - if err != nil { - s.Warnf(" Error restoring consumer %q state: %v", cfg.Name, err) - } + wg.Wait() + } else { + // No parallelTaskQueue provided, do inline as before. + for _, fi := range fis { + doStream(fi) } } // Make sure to cleanup any old remaining snapshots. os.RemoveAll(filepath.Join(jsa.storeDir, snapsDir)) - // Check interest policy streams for auto cleanup. - for _, mset := range ipstreams { - mset.checkForOrphanMsgs() - mset.checkConsumerReplication() - } - s.Debugf("JetStream state for account %q recovered", a.Name) return nil diff --git a/vendor/github.com/nats-io/nats-server/v2/server/jetstream_api.go b/vendor/github.com/nats-io/nats-server/v2/server/jetstream_api.go index 31fcf056762..1f42bf0c208 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/jetstream_api.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/jetstream_api.go @@ -2689,8 +2689,17 @@ func (s *Server) jsLeaderServerRemoveRequest(sub *subscription, c *client, _ *Ac return } + js.mu.Lock() + defer js.mu.Unlock() + + // Another peer-remove is already in progress, don't allow multiple concurrent changes. + if cc.peerRemoveReply != nil { + resp.Error = NewJSClusterServerMemberChangeInflightError() + s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) + return + } + var found string - js.mu.RLock() for _, p := range meta.Peers() { // If Peer is specified, it takes precedence if req.Peer != _EMPTY_ { @@ -2706,7 +2715,6 @@ func (s *Server) jsLeaderServerRemoveRequest(sub *subscription, c *client, _ *Ac break } } - js.mu.RUnlock() if found == _EMPTY_ { resp.Error = NewJSClusterServerNotMemberError() @@ -2714,13 +2722,21 @@ func (s *Server) jsLeaderServerRemoveRequest(sub *subscription, c *client, _ *Ac return } - // So we have a valid peer. - js.mu.Lock() - meta.ProposeRemovePeer(found) - js.mu.Unlock() + if err := meta.ProposeRemovePeer(found); err != nil { + if err == errMembershipChange { + resp.Error = NewJSClusterServerMemberChangeInflightError() + } else { + resp.Error = NewJSRaftGeneralError(err) + } + s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) + return + } - resp.Success = true - s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) + if cc.peerRemoveReply == nil { + cc.peerRemoveReply = make(map[string]peerRemoveInfo, 1) + } + // Only copy the request, the subject and reply are already copied. + cc.peerRemoveReply[found] = peerRemoveInfo{ci: ci, subject: subject, reply: reply, request: string(msg)} } func (s *Server) peerSetToNames(ps []string) []string { @@ -3114,11 +3130,11 @@ func (s *Server) jsLeaderAccountPurgeRequest(sub *subscription, c *client, _ *Ac for _, osa := range streams { for _, oca := range osa.consumers { oca.deleted = true - ca := &consumerAssignment{Group: oca.Group, Stream: oca.Stream, Name: oca.Name, Config: oca.Config, Subject: subject, Client: oca.Client} + ca := &consumerAssignment{Group: oca.Group, Stream: oca.Stream, Name: oca.Name, Config: oca.Config, Subject: subject, Client: oca.Client, Created: oca.Created} meta.Propose(encodeDeleteConsumerAssignment(ca)) nc++ } - sa := &streamAssignment{Group: osa.Group, Config: osa.Config, Subject: subject, Client: osa.Client} + sa := &streamAssignment{Group: osa.Group, Config: osa.Config, Subject: subject, Client: osa.Client, Created: osa.Created} meta.Propose(encodeDeleteStreamAssignment(sa)) ns++ } diff --git a/vendor/github.com/nats-io/nats-server/v2/server/jetstream_batching.go b/vendor/github.com/nats-io/nats-server/v2/server/jetstream_batching.go index fc97f1f5453..a8793e54629 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/jetstream_batching.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/jetstream_batching.go @@ -611,41 +611,46 @@ func checkMsgHeadersPreClusteredProposal( // We need to deny here otherwise we'd need to bump CLFS, and it could succeed on some // peers and not others depending on consumer ack state (if interest policy). // So we deny here, if we allow that means we know it would succeed on every peer. - if discard == DiscardNew && (maxMsgs > 0 || maxBytes > 0) { - // Error if over DiscardNew per subject threshold. - if discardNewPer { - totalMsgsForSubject := i.ops - if i, ok = mset.inflight[subject]; ok { - totalMsgsForSubject += i.ops + if discard == DiscardNew { + if maxMsgs > 0 || maxBytes > 0 { + // Track usual max msgs/bytes thresholds for DiscardNew. + var state StreamState + mset.store.FastState(&state) + + totalMsgs := state.Msgs + totalBytes := state.Bytes + for _, i = range mset.inflight { + totalMsgs += i.ops + totalBytes += i.bytes } - if maxMsgsPer > 0 && totalMsgsForSubject > uint64(maxMsgsPer) { - err = ErrMaxMsgsPerSubject + for _, i = range diff.inflight { + totalMsgs += i.ops + totalBytes += i.bytes + } + + if maxMsgs > 0 && totalMsgs > uint64(maxMsgs) { + err = ErrMaxMsgs + } else if maxBytes > 0 && totalBytes > uint64(maxBytes) { + err = ErrMaxBytes + } + if err != nil { return hdr, msg, 0, NewJSStreamStoreFailedError(err, Unless(err)), err } } - // Track usual max msgs/bytes thresholds for DiscardNew. - var state StreamState - mset.store.FastState(&state) - - totalMsgs := state.Msgs - totalBytes := state.Bytes - for _, i = range mset.inflight { - totalMsgs += i.ops - totalBytes += i.bytes - } - for _, i = range diff.inflight { - totalMsgs += i.ops - totalBytes += i.bytes - } - - if maxMsgs > 0 && totalMsgs > uint64(maxMsgs) { - err = ErrMaxMsgs - } else if maxBytes > 0 && totalBytes > uint64(maxBytes) { - err = ErrMaxBytes - } - if err != nil { - return hdr, msg, 0, NewJSStreamStoreFailedError(err, Unless(err)), err + // Similarly, check DiscardNew per-subject threshold to not need to bump CLFS. + if discardNewPer && maxMsgsPer > 0 { + // Get the current total for this subject. + totalMsgsForSubject := mset.store.SubjectsTotals(subject)[subject] + // Add inflight count in this batch and for this stream. + totalMsgsForSubject += i.ops + if i, ok = mset.inflight[subject]; ok { + totalMsgsForSubject += i.ops + } + if totalMsgsForSubject > uint64(maxMsgsPer) { + err = ErrMaxMsgsPerSubject + return hdr, msg, 0, NewJSStreamStoreFailedError(err, Unless(err)), err + } } } diff --git a/vendor/github.com/nats-io/nats-server/v2/server/jetstream_cluster.go b/vendor/github.com/nats-io/nats-server/v2/server/jetstream_cluster.go index 1cef7348847..a630a4cf02a 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/jetstream_cluster.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/jetstream_cluster.go @@ -51,6 +51,9 @@ type jetStreamCluster struct { // concurrent requests for same account and stream we need to let it process to get // a response but they need to be same group, peers etc. and sync subjects. inflight map[string]map[string]*inflightInfo + // Holds a map of a peer ID to the reply subject, to only respond after gaining + // quorum on the peer-remove action. + peerRemoveReply map[string]peerRemoveInfo // Signals meta-leader should check the stream assignments. streamsCheck bool // Server. @@ -70,6 +73,11 @@ type jetStreamCluster struct { peerStreamCancelMove *subscription // To pop out the monitorCluster before the raft layer. qch chan struct{} + // To notify others that monitorCluster has actually stopped. + stopped chan struct{} + // Track last meta snapshot time and duration for monitoring. + lastMetaSnapTime int64 // Unix nanoseconds + lastMetaSnapDuration int64 // Duration in nanoseconds } // Used to track inflight stream add requests to properly re-use same group and sync subject. @@ -79,6 +87,14 @@ type inflightInfo struct { cfg *StreamConfig } +// Used to track inflight peer-remove info to respond 'success' after quorum. +type peerRemoveInfo struct { + ci *ClientInfo + subject string + reply string + request string +} + // Used to guide placement of streams and meta controllers in clustered JetStream. type Placement struct { Cluster string `json:"cluster,omitempty"` @@ -620,8 +636,11 @@ func (js *jetStream) isStreamHealthy(acc *Account, sa *streamAssignment) error { } msetNode := mset.raftNode() + mset.cfgMu.RLock() + replicas := mset.cfg.Replicas + mset.cfgMu.RUnlock() switch { - case mset.cfg.Replicas <= 1: + case replicas <= 1: return nil // No further checks for R=1 streams case node == nil: @@ -638,12 +657,12 @@ func (js *jetStream) isStreamHealthy(acc *Account, sa *streamAssignment) error { case !mset.isMonitorRunning(): return errors.New("monitor goroutine not running") - case !node.Healthy(): - return errors.New("group node unhealthy") - case mset.isCatchingUp(): return errors.New("stream catching up") + case !node.Healthy(): + return errors.New("group node unhealthy") + default: return nil } @@ -896,6 +915,9 @@ func (js *jetStream) setupMetaGroup() error { } if cfg.Observer { s.Noticef("Turning JetStream metadata controller Observer Mode on") + s.Noticef("In cases where the JetStream domain is not intended to be extended through a SYS account leaf node connection") + s.Noticef("and waiting for leader election until first contact is not acceptable,") + s.Noticef(`manually disable Observer Mode by setting the JetStream Option "extension_hint: %s"`, jsNoExtend) } } else { s.Noticef("JetStream cluster recovering state") @@ -909,7 +931,7 @@ func (js *jetStream) setupMetaGroup() error { cfg.Observer = false case extUndetermined: s.Noticef("Turning JetStream metadata controller Observer Mode on - no previous contact") - s.Noticef("In cases where JetStream will not be extended") + s.Noticef("In cases where the JetStream domain is not intended to be extended through a SYS account leaf node connection") s.Noticef("and waiting for leader election until first contact is not acceptable,") s.Noticef(`manually disable Observer Mode by setting the JetStream Option "extension_hint: %s"`, jsNoExtend) } @@ -948,6 +970,7 @@ func (js *jetStream) setupMetaGroup() error { s: s, c: c, qch: make(chan struct{}), + stopped: make(chan struct{}), } atomic.StoreInt32(&js.clustered, 1) c.registerWithAccount(sysAcc) @@ -1184,6 +1207,16 @@ func (js *jetStream) clusterQuitC() chan struct{} { return nil } +// Return the cluster stopped chan. +func (js *jetStream) clusterStoppedC() chan struct{} { + js.mu.RLock() + defer js.mu.RUnlock() + if js.cluster != nil { + return js.cluster.stopped + } + return nil +} + // Mark that the meta layer is recovering. func (js *jetStream) setMetaRecovering() { js.mu.Lock() @@ -1217,6 +1250,46 @@ type recoveryUpdates struct { updateConsumers map[string]map[string]*consumerAssignment } +func (ru *recoveryUpdates) removeStream(sa *streamAssignment) { + key := sa.recoveryKey() + ru.removeStreams[key] = sa + delete(ru.addStreams, key) + delete(ru.updateStreams, key) + delete(ru.updateConsumers, key) + delete(ru.removeConsumers, key) +} + +func (ru *recoveryUpdates) addStream(sa *streamAssignment) { + key := sa.recoveryKey() + ru.addStreams[key] = sa +} + +func (ru *recoveryUpdates) updateStream(sa *streamAssignment) { + key := sa.recoveryKey() + ru.updateStreams[key] = sa +} + +func (ru *recoveryUpdates) removeConsumer(ca *consumerAssignment) { + key := ca.recoveryKey() + skey := ca.streamRecoveryKey() + if _, ok := ru.removeConsumers[skey]; !ok { + ru.removeConsumers[skey] = map[string]*consumerAssignment{} + } + ru.removeConsumers[skey][key] = ca + if consumers, ok := ru.updateConsumers[skey]; ok { + delete(consumers, key) + } +} + +func (ru *recoveryUpdates) addOrUpdateConsumer(ca *consumerAssignment) { + key := ca.recoveryKey() + skey := ca.streamRecoveryKey() + if _, ok := ru.updateConsumers[skey]; !ok { + ru.updateConsumers[skey] = map[string]*consumerAssignment{} + } + ru.updateConsumers[skey][key] = ca +} + // Called after recovery of the cluster on startup to check for any orphans. // Streams and consumers are recovered from disk, and the meta layer's mappings // should clean them up, but under crash scenarios there could be orphans. @@ -1294,9 +1367,10 @@ func (js *jetStream) checkForOrphans() { func (js *jetStream) monitorCluster() { s, n := js.server(), js.getMetaGroup() - qch, rqch, lch, aq := js.clusterQuitC(), n.QuitC(), n.LeadChangeC(), n.ApplyQ() + qch, stopped, rqch, lch, aq := js.clusterQuitC(), js.clusterStoppedC(), n.QuitC(), n.LeadChangeC(), n.ApplyQ() defer s.grWG.Done() + defer close(stopped) s.Debugf("Starting metadata monitor") defer s.Debugf("Exiting metadata monitor") @@ -1339,15 +1413,24 @@ func (js *jetStream) monitorCluster() { // Set to true to start. js.setMetaRecovering() + recovering := true // Snapshotting function. - doSnapshot := func() { + doSnapshot := func(force bool) { // Suppress during recovery. - if js.isMetaRecovering() { + if recovering { return } - // For the meta layer we want to snapshot when asked if we need one or have any entries that we can compact. - if ne, _ := n.Size(); ne > 0 || n.NeedSnapshot() { + // Look up what the threshold is for compaction. Re-reading from config here as it is reloadable. + js.srv.optsMu.RLock() + ethresh := js.srv.opts.JetStreamMetaCompact + szthresh := js.srv.opts.JetStreamMetaCompactSize + js.srv.optsMu.RUnlock() + // Work out our criteria for snapshotting. + byEntries, bySize := ethresh > 0, szthresh > 0 + byNeither := !byEntries && !bySize + // For the meta layer we want to snapshot when over the above threshold (which could be 0 by default). + if ne, nsz := n.Size(); force || byNeither || (byEntries && ne > ethresh) || (bySize && nsz > szthresh) || n.NeedSnapshot() { snap, err := js.metaSnapshot() if err != nil { s.Warnf("Error generating JetStream cluster snapshot: %v", err) @@ -1359,13 +1442,7 @@ func (js *jetStream) monitorCluster() { } } - ru := &recoveryUpdates{ - removeStreams: make(map[string]*streamAssignment), - removeConsumers: make(map[string]map[string]*consumerAssignment), - addStreams: make(map[string]*streamAssignment), - updateStreams: make(map[string]*streamAssignment), - updateConsumers: make(map[string]map[string]*consumerAssignment), - } + var ru *recoveryUpdates // Make sure to cancel any pending checkForOrphans calls if the // monitor goroutine exits. @@ -1376,66 +1453,81 @@ func (js *jetStream) monitorCluster() { select { case <-s.quitCh: // Server shutting down, but we might receive this before qch, so try to snapshot. - doSnapshot() + doSnapshot(false) return case <-rqch: - // Clean signal from shutdown routine so do best effort attempt to snapshot meta layer. - doSnapshot() + // Raft node is closed, no use in trying to snapshot. return case <-qch: // Clean signal from shutdown routine so do best effort attempt to snapshot meta layer. - doSnapshot() - // Return the signal back since shutdown will be waiting. - close(qch) + doSnapshot(false) return case <-aq.ch: ces := aq.pop() for _, ce := range ces { + if recovering && ru == nil { + ru = &recoveryUpdates{ + removeStreams: make(map[string]*streamAssignment), + removeConsumers: make(map[string]map[string]*consumerAssignment), + addStreams: make(map[string]*streamAssignment), + updateStreams: make(map[string]*streamAssignment), + updateConsumers: make(map[string]map[string]*consumerAssignment), + } + } if ce == nil { - // Process any removes that are still valid after recovery. - for _, cas := range ru.removeConsumers { - for _, ca := range cas { - js.processConsumerRemoval(ca) + if ru != nil { + // Process any removes that are still valid after recovery. + for _, cas := range ru.removeConsumers { + for _, ca := range cas { + js.processConsumerRemoval(ca) + } } - } - for _, sa := range ru.removeStreams { - js.processStreamRemoval(sa) - } - // Process stream additions. - for _, sa := range ru.addStreams { - js.processStreamAssignment(sa) - } - // Process pending updates. - for _, sa := range ru.updateStreams { - js.processUpdateStreamAssignment(sa) - } - // Now consumers. - for _, cas := range ru.updateConsumers { - for _, ca := range cas { - js.processConsumerAssignment(ca) + for _, sa := range ru.removeStreams { + js.processStreamRemoval(sa) + } + // Process stream additions. + for _, sa := range ru.addStreams { + js.processStreamAssignment(sa) + } + // Process pending updates. + for _, sa := range ru.updateStreams { + js.processUpdateStreamAssignment(sa) + } + // Now consumers. + for _, cas := range ru.updateConsumers { + for _, ca := range cas { + js.processConsumerAssignment(ca) + } } } // Signals we have replayed all of our metadata. + wasMetaRecovering := js.isMetaRecovering() js.clearMetaRecovering() + recovering = false // Clear. ru = nil s.Debugf("Recovered JetStream cluster metadata") - oc = time.AfterFunc(30*time.Second, js.checkForOrphans) - // Do a health check here as well. - go checkHealth() + // Snapshot now so we start with freshly compacted log. + doSnapshot(true) + if wasMetaRecovering { + oc = time.AfterFunc(30*time.Second, js.checkForOrphans) + // Do a health check here as well. + go checkHealth() + } continue } - if didSnap, err := js.applyMetaEntries(ce.Entries, ru); err == nil { + if isRecovering, didSnap, err := js.applyMetaEntries(ce.Entries, ru); err == nil { var nb uint64 // Some entries can fail without an error when shutting down, don't move applied forward. if !js.isShuttingDown() { _, nb = n.Applied(ce.Index) } if js.hasPeerEntries(ce.Entries) || (didSnap && !isLeader) { - doSnapshot() + doSnapshot(true) } else if nb > compactSizeMin && time.Since(lastSnapTime) > minSnapDelta { - doSnapshot() + doSnapshot(false) } + recovering = isRecovering } else { s.Warnf("Error applying JetStream cluster entries: %v", err) } @@ -1450,11 +1542,11 @@ func (js *jetStream) monitorCluster() { s.sendInternalMsgLocked(serverStatsPingReqSubj, _EMPTY_, nil, nil) // Install a snapshot as we become leader. js.checkClusterSize() - doSnapshot() + doSnapshot(false) } case <-t.C: - doSnapshot() + doSnapshot(false) // Periodically check the cluster size. if n.Leader() { js.checkClusterSize() @@ -1608,19 +1700,27 @@ func (js *jetStream) metaSnapshot() ([]byte, error) { return nil, err } - // Track how long it took to compress the JSON + // Track how long it took to compress the JSON. cstart := time.Now() snap := s2.Encode(nil, b) cend := time.Since(cstart) + took := time.Since(start) - if took := time.Since(start); took > time.Second { + if took > time.Second { s.rateLimitFormatWarnf("Metalayer snapshot took %.3fs (streams: %d, consumers: %d, marshal: %.3fs, s2: %.3fs, uncompressed: %d, compressed: %d)", took.Seconds(), nsa, nca, mend.Seconds(), cend.Seconds(), len(b), len(snap)) } + + // Track in jsz monitoring as well. + if cc != nil { + atomic.StoreInt64(&cc.lastMetaSnapTime, start.UnixNano()) + atomic.StoreInt64(&cc.lastMetaSnapDuration, int64(took)) + } + return snap, nil } -func (js *jetStream) applyMetaSnapshot(buf []byte, ru *recoveryUpdates, isRecovering bool) error { +func (js *jetStream) applyMetaSnapshot(buf []byte, ru *recoveryUpdates, isRecovering, startupRecovery bool) error { var wsas []writeableStreamAssignment if len(buf) > 0 { jse, err := s2.Decode(nil, buf) @@ -1705,25 +1805,32 @@ func (js *jetStream) applyMetaSnapshot(buf []byte, ru *recoveryUpdates, isRecove for _, sa := range saDel { js.setStreamAssignmentRecovering(sa) if isRecovering { - key := sa.recoveryKey() - ru.removeStreams[key] = sa - delete(ru.addStreams, key) - delete(ru.updateStreams, key) - delete(ru.updateConsumers, key) - delete(ru.removeConsumers, key) + ru.removeStream(sa) } else { js.processStreamRemoval(sa) } } // Now do add for the streams. Also add in all consumers. for _, sa := range saAdd { + consumers := sa.consumers js.setStreamAssignmentRecovering(sa) - js.processStreamAssignment(sa) + if isRecovering { + // Since we're recovering and storing up changes, we'll need to clear out these consumers. + // Some might be removed, and we'll recover those later, must not be able to remember them. + sa.consumers = nil + ru.addStream(sa) + } else { + js.processStreamAssignment(sa) + } // We can simply process the consumers. - for _, ca := range sa.consumers { + for _, ca := range consumers { js.setConsumerAssignmentRecovering(ca) - js.processConsumerAssignment(ca) + if isRecovering { + ru.addOrUpdateConsumer(ca) + } else { + js.processConsumerAssignment(ca) + } } } @@ -1732,10 +1839,7 @@ func (js *jetStream) applyMetaSnapshot(buf []byte, ru *recoveryUpdates, isRecove for _, sa := range saChk { js.setStreamAssignmentRecovering(sa) if isRecovering { - key := sa.recoveryKey() - ru.updateStreams[key] = sa - delete(ru.addStreams, key) - delete(ru.removeStreams, key) + ru.updateStream(sa) } else { js.processUpdateStreamAssignment(sa) } @@ -1745,15 +1849,7 @@ func (js *jetStream) applyMetaSnapshot(buf []byte, ru *recoveryUpdates, isRecove for _, ca := range caDel { js.setConsumerAssignmentRecovering(ca) if isRecovering { - key := ca.recoveryKey() - skey := ca.streamRecoveryKey() - if _, ok := ru.removeConsumers[skey]; !ok { - ru.removeConsumers[skey] = map[string]*consumerAssignment{} - } - ru.removeConsumers[skey][key] = ca - if consumers, ok := ru.updateConsumers[skey]; ok { - delete(consumers, key) - } + ru.removeConsumer(ca) } else { js.processConsumerRemoval(ca) } @@ -1761,15 +1857,7 @@ func (js *jetStream) applyMetaSnapshot(buf []byte, ru *recoveryUpdates, isRecove for _, ca := range caAdd { js.setConsumerAssignmentRecovering(ca) if isRecovering { - key := ca.recoveryKey() - skey := ca.streamRecoveryKey() - if consumers, ok := ru.removeConsumers[skey]; ok { - delete(consumers, key) - } - if _, ok := ru.updateConsumers[skey]; !ok { - ru.updateConsumers[skey] = map[string]*consumerAssignment{} - } - ru.updateConsumers[skey][key] = ca + ru.addOrUpdateConsumer(ca) } else { js.processConsumerAssignment(ca) } @@ -2010,20 +2098,59 @@ func (ca *consumerAssignment) recoveryKey() string { return ca.Client.serviceAccount() + ksep + ca.Stream + ksep + ca.Name } -func (js *jetStream) applyMetaEntries(entries []*Entry, ru *recoveryUpdates) (bool, error) { +func (js *jetStream) applyMetaEntries(entries []*Entry, ru *recoveryUpdates) (bool, bool, error) { var didSnap bool - isRecovering := js.isMetaRecovering() + isRecovering := ru != nil + startupRecovery := js.isMetaRecovering() for _, e := range entries { + // If we received a lower-level catchup entry, mark that we're recovering. + // We can optimize by staging all meta operations until we're caught up. + // At that point we can apply the diff in one go. + if e.Type == EntryCatchup { + isRecovering = true + // A catchup entry only contains this, so we can exit now and have the + // recoveryUpdates struct be populated for the next invocation of applyMetaEntries. + return isRecovering, didSnap, nil + } + if e.Type == EntrySnapshot { - js.applyMetaSnapshot(e.Data, ru, isRecovering) + js.applyMetaSnapshot(e.Data, ru, isRecovering, startupRecovery) didSnap = true } else if e.Type == EntryRemovePeer { - if !isRecovering { - js.processRemovePeer(string(e.Data)) + if !js.isMetaRecovering() { + peer := string(e.Data) + js.processRemovePeer(peer) + + // The meta leader can now respond to the peer-removal, + // since a quorum of nodes has this in their log. + s := js.srv + if s.JetStreamIsLeader() { + var ( + info peerRemoveInfo + ok bool + ) + js.mu.Lock() + if cc := js.cluster; cc != nil && cc.peerRemoveReply != nil { + if info, ok = cc.peerRemoveReply[peer]; ok { + delete(cc.peerRemoveReply, peer) + } + if len(cc.peerRemoveReply) == 0 { + cc.peerRemoveReply = nil + } + } + js.mu.Unlock() + + if info.reply != _EMPTY_ { + sysAcc := s.SystemAccount() + var resp = JSApiMetaServerRemoveResponse{ApiResponse: ApiResponse{Type: JSApiMetaServerRemoveResponseType}} + resp.Success = true + s.sendAPIResponse(info.ci, sysAcc, info.subject, info.reply, info.request, s.jsonResponse(&resp)) + } + } } } else if e.Type == EntryAddPeer { - if !isRecovering { + if !js.isMetaRecovering() { js.processAddPeer(string(e.Data)) } } else { @@ -2033,13 +2160,11 @@ func (js *jetStream) applyMetaEntries(entries []*Entry, ru *recoveryUpdates) (bo sa, err := decodeStreamAssignment(js.srv, buf[1:]) if err != nil { js.srv.Errorf("JetStream cluster failed to decode stream assignment: %q", buf[1:]) - return didSnap, err + return isRecovering, didSnap, err } if isRecovering { js.setStreamAssignmentRecovering(sa) - key := sa.recoveryKey() - ru.addStreams[key] = sa - delete(ru.removeStreams, key) + ru.addStream(sa) } else { js.processStreamAssignment(sa) } @@ -2047,16 +2172,11 @@ func (js *jetStream) applyMetaEntries(entries []*Entry, ru *recoveryUpdates) (bo sa, err := decodeStreamAssignment(js.srv, buf[1:]) if err != nil { js.srv.Errorf("JetStream cluster failed to decode stream assignment: %q", buf[1:]) - return didSnap, err + return isRecovering, didSnap, err } if isRecovering { js.setStreamAssignmentRecovering(sa) - key := sa.recoveryKey() - ru.removeStreams[key] = sa - delete(ru.addStreams, key) - delete(ru.updateStreams, key) - delete(ru.updateConsumers, key) - delete(ru.removeConsumers, key) + ru.removeStream(sa) } else { js.processStreamRemoval(sa) } @@ -2064,19 +2184,11 @@ func (js *jetStream) applyMetaEntries(entries []*Entry, ru *recoveryUpdates) (bo ca, err := decodeConsumerAssignment(buf[1:]) if err != nil { js.srv.Errorf("JetStream cluster failed to decode consumer assignment: %q", buf[1:]) - return didSnap, err + return isRecovering, didSnap, err } if isRecovering { js.setConsumerAssignmentRecovering(ca) - key := ca.recoveryKey() - skey := ca.streamRecoveryKey() - if consumers, ok := ru.removeConsumers[skey]; ok { - delete(consumers, key) - } - if _, ok := ru.updateConsumers[skey]; !ok { - ru.updateConsumers[skey] = map[string]*consumerAssignment{} - } - ru.updateConsumers[skey][key] = ca + ru.addOrUpdateConsumer(ca) } else { js.processConsumerAssignment(ca) } @@ -2084,19 +2196,11 @@ func (js *jetStream) applyMetaEntries(entries []*Entry, ru *recoveryUpdates) (bo ca, err := decodeConsumerAssignmentCompressed(buf[1:]) if err != nil { js.srv.Errorf("JetStream cluster failed to decode compressed consumer assignment: %q", buf[1:]) - return didSnap, err + return isRecovering, didSnap, err } if isRecovering { js.setConsumerAssignmentRecovering(ca) - key := ca.recoveryKey() - skey := ca.streamRecoveryKey() - if consumers, ok := ru.removeConsumers[skey]; ok { - delete(consumers, key) - } - if _, ok := ru.updateConsumers[skey]; !ok { - ru.updateConsumers[skey] = map[string]*consumerAssignment{} - } - ru.updateConsumers[skey][key] = ca + ru.addOrUpdateConsumer(ca) } else { js.processConsumerAssignment(ca) } @@ -2104,19 +2208,11 @@ func (js *jetStream) applyMetaEntries(entries []*Entry, ru *recoveryUpdates) (bo ca, err := decodeConsumerAssignment(buf[1:]) if err != nil { js.srv.Errorf("JetStream cluster failed to decode consumer assignment: %q", buf[1:]) - return didSnap, err + return isRecovering, didSnap, err } if isRecovering { js.setConsumerAssignmentRecovering(ca) - key := ca.recoveryKey() - skey := ca.streamRecoveryKey() - if _, ok := ru.removeConsumers[skey]; !ok { - ru.removeConsumers[skey] = map[string]*consumerAssignment{} - } - ru.removeConsumers[skey][key] = ca - if consumers, ok := ru.updateConsumers[skey]; ok { - delete(consumers, key) - } + ru.removeConsumer(ca) } else { js.processConsumerRemoval(ca) } @@ -2124,14 +2220,11 @@ func (js *jetStream) applyMetaEntries(entries []*Entry, ru *recoveryUpdates) (bo sa, err := decodeStreamAssignment(js.srv, buf[1:]) if err != nil { js.srv.Errorf("JetStream cluster failed to decode stream assignment: %q", buf[1:]) - return didSnap, err + return isRecovering, didSnap, err } if isRecovering { js.setStreamAssignmentRecovering(sa) - key := sa.recoveryKey() - ru.updateStreams[key] = sa - delete(ru.addStreams, key) - delete(ru.removeStreams, key) + ru.updateStream(sa) } else { js.processUpdateStreamAssignment(sa) } @@ -2140,7 +2233,7 @@ func (js *jetStream) applyMetaEntries(entries []*Entry, ru *recoveryUpdates) (bo } } } - return didSnap, nil + return isRecovering, didSnap, nil } func (rg *raftGroup) isMember(id string) bool { @@ -2614,11 +2707,14 @@ func (js *jetStream) monitorStream(mset *stream, sa *streamAssignment, sendSnaps return case <-mqch: // Clean signal from shutdown routine so do best effort attempt to snapshot. - doSnapshot() + // Don't snapshot if not shutting down, monitor goroutine could be going away + // on a scale down or a remove for example. + if s.isShuttingDown() { + doSnapshot() + } return case <-qch: - // Clean signal from shutdown routine so do best effort attempt to snapshot. - doSnapshot() + // Raft node is closed, no use in trying to snapshot. return case <-aq.ch: var ne, nb uint64 @@ -2629,6 +2725,9 @@ func (js *jetStream) monitorStream(mset *stream, sa *streamAssignment, sendSnaps for _, ce := range ces { // No special processing needed for when we are caught up on restart. if ce == nil { + if !isRecovering { + continue + } isRecovering = false // If we are interest based make sure to check consumers if interest retention policy. // This is to make sure we process any outstanding acks from all consumers. @@ -2713,6 +2812,9 @@ func (js *jetStream) monitorStream(mset *stream, sa *streamAssignment, sendSnaps } case isLeader = <-lch: + // Process our leader change. + js.processStreamLeaderChange(mset, isLeader) + if isLeader { if mset != nil && n != nil && sendSnapshot && !isRecovering { // If we *are* recovering at the time then this will get done when the apply queue @@ -2729,14 +2831,10 @@ func (js *jetStream) monitorStream(mset *stream, sa *streamAssignment, sendSnaps } // Always cancel if this was running. stopDirectMonitoring() - } else if !n.Leaderless() { js.setStreamAssignmentRecovering(sa) } - // Process our leader change. - js.processStreamLeaderChange(mset, isLeader) - // We may receive a leader change after the stream assignment which would cancel us // monitoring for this closely. So re-assess our state here as well. // Or the old leader is no longer part of the set and transferred leadership @@ -2844,7 +2942,7 @@ func (js *jetStream) monitorStream(mset *stream, sa *streamAssignment, sendSnaps ci := js.clusterInfo(rg) mset.checkClusterInfo(ci) - newPeers, oldPeers, newPeerSet, oldPeerSet := genPeerInfo(rg.Peers, len(rg.Peers)-replicas) + newPeers, _, newPeerSet, oldPeerSet := genPeerInfo(rg.Peers, len(rg.Peers)-replicas) // If we are part of the new peerset and we have been passed the baton. // We will handle scale down. @@ -2872,11 +2970,8 @@ func (js *jetStream) monitorStream(mset *stream, sa *streamAssignment, sendSnaps if needToWait { continue } - // We are good to go, can scale down here. - for _, p := range oldPeers { - n.ProposeRemovePeer(p) - } + n.ProposeKnownPeers(newPeers) csa := sa.copyGroup() csa.Group.Peers = newPeers @@ -3158,6 +3253,12 @@ func (js *jetStream) applyStreamEntries(mset *stream, ce *CommittedEntry, isReco mset.mu.RUnlock() for i, e := range ce.Entries { + // Ignore if lower-level catchup is started. + // We don't need to optimize during this, all entries are handled as normal. + if e.Type == EntryCatchup { + continue + } + // Check if a batch is abandoned. if e.Type != EntryNormal && batch != nil && batch.id != _EMPTY_ { batch.rejectBatchState(mset) @@ -4019,7 +4120,7 @@ func (js *jetStream) processStreamAssignment(sa *streamAssignment) { js.mu.Unlock() // Need to stop the stream, we can't keep running with an old config. - acc, err := s.LookupAccount(accName) + acc, err := s.lookupOrFetchAccount(accName, isMember) if err != nil { return } @@ -4033,7 +4134,7 @@ func (js *jetStream) processStreamAssignment(sa *streamAssignment) { } js.mu.Unlock() - acc, err := s.LookupAccount(accName) + acc, err := s.lookupOrFetchAccount(accName, isMember) if err != nil { ll := fmt.Sprintf("Account [%s] lookup for stream create failed: %v", accName, err) if isMember { @@ -4148,7 +4249,7 @@ func (js *jetStream) processUpdateStreamAssignment(sa *streamAssignment) { js.mu.Unlock() // Need to stop the stream, we can't keep running with an old config. - acc, err := s.LookupAccount(accName) + acc, err := s.lookupOrFetchAccount(accName, isMember) if err != nil { return } @@ -4162,9 +4263,14 @@ func (js *jetStream) processUpdateStreamAssignment(sa *streamAssignment) { } js.mu.Unlock() - acc, err := s.LookupAccount(accName) + acc, err := s.lookupOrFetchAccount(accName, isMember) if err != nil { - s.Warnf("Update Stream Account %s, error on lookup: %v", accName, err) + ll := fmt.Sprintf("Update Stream Account %s, error on lookup: %v", accName, err) + if isMember { + s.Warnf(ll) + } else { + s.Debugf(ll) + } return } @@ -4358,7 +4464,7 @@ func (js *jetStream) processClusterCreateStream(acc *Account, sa *streamAssignme } js.mu.RLock() - s, rg := js.srv, sa.Group + s, rg, created := js.srv, sa.Group, sa.Created alreadyRunning := rg.node != nil storage := sa.Config.Storage restore := sa.Restore @@ -4457,7 +4563,7 @@ func (js *jetStream) processClusterCreateStream(acc *Account, sa *streamAssignme mset, err = acc.addStreamWithAssignment(sa.Config, nil, sa, false) } if mset != nil { - mset.setCreatedTime(sa.Created) + mset.setCreatedTime(created) } } @@ -4544,7 +4650,7 @@ func (js *jetStream) processClusterCreateStream(acc *Account, sa *streamAssignme mset, err = acc.lookupStream(sa.Config.Name) if mset != nil { mset.setStreamAssignment(sa) - mset.setCreatedTime(sa.Created) + mset.setCreatedTime(created) } } if err != nil { @@ -4632,12 +4738,15 @@ func (js *jetStream) processStreamRemoval(sa *streamAssignment) { js.mu.Unlock() return } - stream := sa.Config.Name - isMember := sa.Group.isMember(cc.meta.ID()) - wasLeader := cc.isStreamLeader(sa.Client.serviceAccount(), stream) + accName, stream, created := sa.Client.serviceAccount(), sa.Config.Name, sa.Created + var isMember bool + if sa.Group != nil { + isMember = sa.Group.isMember(cc.meta.ID()) + } + wasLeader := cc.isStreamLeader(accName, stream) // Check if we already have this assigned. - accStreams := cc.streams[sa.Client.serviceAccount()] + accStreams := cc.streams[accName] needDelete := accStreams != nil && accStreams[stream] != nil if needDelete { if osa := accStreams[stream]; osa != nil && osa.unsupported != nil { @@ -4649,11 +4758,22 @@ func (js *jetStream) processStreamRemoval(sa *streamAssignment) { } delete(accStreams, stream) if len(accStreams) == 0 { - delete(cc.streams, sa.Client.serviceAccount()) + delete(cc.streams, accName) } } js.mu.Unlock() + // During initial/startup recovery we'll not have registered the stream assignment, + // but might have recovered the stream from disk. We'll need to make sure that we only + // delete the stream if it wasn't created after this delete. + if !needDelete && !created.IsZero() { + if acc, err := s.lookupOrFetchAccount(accName, isMember); err == nil { + if mset, err := acc.lookupStream(stream); err == nil { + needDelete = !mset.createdTime().After(created) + } + } + } + if needDelete { js.processClusterDeleteStream(sa, isMember, wasLeader) } @@ -4837,7 +4957,7 @@ func (js *jetStream) processConsumerAssignment(ca *consumerAssignment) { // Be conservative by protecting the whole stream, even if just one consumer is unsupported. // This ensures it's safe, even with Interest-based retention where it would otherwise // continue accepting but dropping messages. - acc, err := s.LookupAccount(accName) + acc, err := s.lookupOrFetchAccount(accName, isMember) if err != nil { return } @@ -4851,7 +4971,7 @@ func (js *jetStream) processConsumerAssignment(ca *consumerAssignment) { } js.mu.Unlock() - acc, err := s.LookupAccount(accName) + acc, err := s.lookupOrFetchAccount(accName, isMember) if err != nil { ll := fmt.Sprintf("Account [%s] lookup for consumer create failed: %v", accName, err) if isMember { @@ -4945,11 +5065,12 @@ func (js *jetStream) processConsumerRemoval(ca *consumerAssignment) { return } - wasLeader := cc.isConsumerLeader(ca.Client.serviceAccount(), ca.Stream, ca.Name) + accName, stream, name, created := ca.Client.serviceAccount(), ca.Stream, ca.Name, ca.Created + wasLeader := cc.isConsumerLeader(accName, stream, name) // Delete from our state. var needDelete bool - if accStreams := cc.streams[ca.Client.serviceAccount()]; accStreams != nil { + if accStreams := cc.streams[accName]; accStreams != nil { if sa := accStreams[ca.Stream]; sa != nil && sa.consumers != nil && sa.consumers[ca.Name] != nil { oca := sa.consumers[ca.Name] // Make sure this removal is for what we have, otherwise ignore. @@ -4966,6 +5087,19 @@ func (js *jetStream) processConsumerRemoval(ca *consumerAssignment) { } js.mu.Unlock() + // During initial/startup recovery we'll not have registered the consumer assignment, + // but might have recovered the consumer from disk. We'll need to make sure that we only + // delete the consumer if it wasn't created after this delete. + if !needDelete && !created.IsZero() { + if acc, err := s.LookupAccount(accName); err == nil { + if mset, err := acc.lookupStream(stream); err == nil { + if o := mset.lookupConsumer(name); o != nil { + needDelete = !o.createdTime().After(created) + } + } + } + } + if needDelete { js.processClusterDeleteConsumer(ca, wasLeader) } @@ -4993,7 +5127,7 @@ func (js *jetStream) processClusterCreateConsumer(ca *consumerAssignment, state acc, err := s.LookupAccount(accName) if err != nil { - s.Warnf("JetStream cluster failed to lookup axccount %q: %v", accName, err) + s.Warnf("JetStream cluster failed to lookup account %q: %v", accName, err) return } @@ -5295,7 +5429,7 @@ func (js *jetStream) processClusterDeleteConsumer(ca *consumerAssignment, wasLea } if err != nil { - resp.Error = NewJSStreamNotFoundError(Unless(err)) + resp.Error = NewJSConsumerNotFoundError(Unless(err)) s.sendAPIErrResponse(ca.Client, acc, ca.Subject, ca.Reply, _EMPTY_, s.jsonResponse(resp)) } else { resp.Success = true @@ -5512,17 +5646,23 @@ func (js *jetStream) monitorConsumer(o *consumer, ca *consumerAssignment) { return case <-mqch: // Clean signal from shutdown routine so do best effort attempt to snapshot. - doSnapshot(false) + // Don't snapshot if not shutting down, monitor goroutine could be going away + // on a scale down or a remove for example. + if s.isShuttingDown() { + doSnapshot(false) + } return case <-qch: - // Clean signal from shutdown routine so do best effort attempt to snapshot. - doSnapshot(false) + // Raft node is closed, no use in trying to snapshot. return case <-aq.ch: ces := aq.pop() for _, ce := range ces { // No special processing needed for when we are caught up on restart. if ce == nil { + if !recovering { + continue + } recovering = false if n.NeedSnapshot() { doSnapshot(true) @@ -5611,14 +5751,12 @@ func (js *jetStream) monitorConsumer(o *consumer, ca *consumerAssignment) { stopMigrationMonitoring() continue } - newPeers, oldPeers, newPeerSet, _ := genPeerInfo(rg.Peers, len(rg.Peers)-replicas) + newPeers, _, newPeerSet, _ := genPeerInfo(rg.Peers, len(rg.Peers)-replicas) // If we are part of the new peerset and we have been passed the baton. // We will handle scale down. if newPeerSet[ourPeerId] { - for _, p := range oldPeers { - n.ProposeRemovePeer(p) - } + n.ProposeKnownPeers(newPeers) cca := ca.copyGroup() cca.Group.Peers = newPeers cca.Group.Cluster = s.cachedClusterName() @@ -5652,6 +5790,12 @@ func (js *jetStream) monitorConsumer(o *consumer, ca *consumerAssignment) { func (js *jetStream) applyConsumerEntries(o *consumer, ce *CommittedEntry, isLeader bool) error { for _, e := range ce.Entries { + // Ignore if lower-level catchup is started. + // We don't need to optimize during this, all entries are handled as normal. + if e.Type == EntryCatchup { + continue + } + if e.Type == EntrySnapshot { if !isLeader { // No-op needed? @@ -6296,6 +6440,9 @@ func (js *jetStream) processLeaderChange(isLeader bool) { js.mu.Lock() defer js.mu.Unlock() + // Clear replies for peer-removes. + js.cluster.peerRemoveReply = nil + if isLeader { if meta := js.cluster.meta; meta != nil && meta.IsObserver() { meta.StepDown() @@ -6321,7 +6468,7 @@ func (js *jetStream) processLeaderChange(isLeader bool) { } if sa.Sync == _EMPTY_ { s.Warnf("Stream assignment corrupt for stream '%s > %s'", acc, sa.Config.Name) - nsa := &streamAssignment{Group: sa.Group, Config: sa.Config, Subject: sa.Subject, Reply: sa.Reply, Client: sa.Client} + nsa := &streamAssignment{Group: sa.Group, Config: sa.Config, Subject: sa.Subject, Reply: sa.Reply, Client: sa.Client, Created: sa.Created} nsa.Sync = syncSubjForStream() cc.meta.Propose(encodeUpdateStreamAssignment(nsa)) } @@ -7436,7 +7583,7 @@ func (s *Server) jsClusteredStreamDeleteRequest(ci *ClientInfo, acc *Account, st return } - sa := &streamAssignment{Group: osa.Group, Config: osa.Config, Subject: subject, Reply: reply, Client: ci} + sa := &streamAssignment{Group: osa.Group, Config: osa.Config, Subject: subject, Reply: reply, Client: ci, Created: osa.Created} cc.meta.Propose(encodeDeleteStreamAssignment(sa)) } @@ -7911,7 +8058,7 @@ func (s *Server) jsClusteredConsumerDeleteRequest(ci *ClientInfo, acc *Account, return } oca.deleted = true - ca := &consumerAssignment{Group: oca.Group, Stream: stream, Name: consumer, Config: oca.Config, Subject: subject, Reply: reply, Client: ci} + ca := &consumerAssignment{Group: oca.Group, Stream: stream, Name: consumer, Config: oca.Config, Subject: subject, Reply: reply, Client: ci, Created: oca.Created} cc.meta.Propose(encodeDeleteConsumerAssignment(ca)) } @@ -8756,6 +8903,13 @@ func (mset *stream) stateSnapshotLocked() []byte { } // Older v1 version with deleted as a sorted []uint64. + // For a stream with millions or billions of interior deletes, this will be huge. + // Now that all server versions 2.10.+ support binary snapshots, we should never fall back. + assert.Unreachable("Legacy JSON stream snapshot used", map[string]any{ + "stream": mset.cfg.Name, + "account": mset.acc.Name, + }) + state := mset.store.State() snap := &streamSnapshot{ Msgs: state.Msgs, @@ -9139,6 +9293,10 @@ func (mset *stream) processSnapshot(snap *StreamReplicatedState, index uint64) ( mset.store.FastState(&state) sreq := mset.calculateSyncRequest(&state, snap, index) + if mset.sa == nil || mset.node == nil { + mset.mu.Unlock() + return errCatchupStreamStopped + } s, js, subject, n, st := mset.srv, mset.js, mset.sa.Sync, mset.node, mset.cfg.Storage qname := fmt.Sprintf("[ACC:%s] stream '%s' snapshot", mset.acc.Name, mset.cfg.Name) mset.mu.Unlock() @@ -9881,6 +10039,7 @@ func (mset *stream) runCatchup(sendSubject string, sreq *streamSyncRequest) { } } + start := time.Now() mset.setCatchupPeer(sreq.Peer, last-seq) var spb int @@ -9889,7 +10048,7 @@ func (mset *stream) runCatchup(sendSubject string, sreq *streamSyncRequest) { sendNextBatchAndContinue := func(qch chan struct{}) bool { // Check if we know we will not enter the loop because we are done. if seq > last { - s.Noticef("Catchup for stream '%s > %s' complete", mset.account(), mset.name()) + s.Noticef("Catchup for stream '%s > %s' complete (took %v)", mset.account(), mset.name(), time.Since(start)) // EOF s.sendInternalMsgLocked(sendSubject, _EMPTY_, nil, nil) return false @@ -9958,7 +10117,7 @@ func (mset *stream) runCatchup(sendSubject string, sreq *streamSyncRequest) { // See if we should use LoadNextMsg instead of walking sequence by sequence if we have an order magnitude more interior deletes. // Only makes sense with delete range capabilities. - useLoadNext := drOk && (uint64(state.NumDeleted) > 10*state.Msgs) + useLoadNext := drOk && (uint64(state.NumDeleted) > 2*state.Msgs || state.NumDeleted > 1_000_000) var smv StoreMsg for ; seq <= last && atomic.LoadInt64(&outb) <= maxOutBytes && atomic.LoadInt32(&outm) <= maxOutMsgs && s.gcbBelowMax(); seq++ { @@ -9998,8 +10157,8 @@ func (mset *stream) runCatchup(sendSubject string, sreq *streamSyncRequest) { // The snapshot has a larger last sequence then we have. This could be due to a truncation // when trying to recover after corruption, still not 100% sure. Could be off by 1 too somehow, // but tested a ton of those with no success. - s.Warnf("Catchup for stream '%s > %s' completed, but requested sequence %d was larger than current state: %+v", - mset.account(), mset.name(), seq, state) + s.Warnf("Catchup for stream '%s > %s' completed (took %v), but requested sequence %d was larger than current state: %+v", + mset.account(), mset.name(), time.Since(start), seq, state) // Try our best to redo our invalidated snapshot as well. if n := mset.raftNode(); n != nil { if snap := mset.stateSnapshot(); snap != nil { @@ -10045,7 +10204,7 @@ func (mset *stream) runCatchup(sendSubject string, sreq *streamSyncRequest) { if drOk && dr.First > 0 { sendDR() } - s.Noticef("Catchup for stream '%s > %s' complete", mset.account(), mset.name()) + s.Noticef("Catchup for stream '%s > %s' complete (took %v)", mset.account(), mset.name(), time.Since(start)) // EOF s.sendInternalMsgLocked(sendSubject, _EMPTY_, nil, nil) return false diff --git a/vendor/github.com/nats-io/nats-server/v2/server/jetstream_errors_generated.go b/vendor/github.com/nats-io/nats-server/v2/server/jetstream_errors_generated.go index d244ebd7ac2..8baf4211c35 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/jetstream_errors_generated.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/jetstream_errors_generated.go @@ -59,6 +59,9 @@ const ( // JSClusterRequiredErr JetStream clustering support required JSClusterRequiredErr ErrorIdentifier = 10010 + // JSClusterServerMemberChangeInflightErr cluster member change is in progress + JSClusterServerMemberChangeInflightErr ErrorIdentifier = 10202 + // JSClusterServerNotMemberErr server is not a member of the cluster JSClusterServerNotMemberErr ErrorIdentifier = 10044 @@ -626,6 +629,7 @@ var ( JSClusterNotLeaderErr: {Code: 500, ErrCode: 10009, Description: "JetStream cluster can not handle request"}, JSClusterPeerNotMemberErr: {Code: 400, ErrCode: 10040, Description: "peer not a member"}, JSClusterRequiredErr: {Code: 503, ErrCode: 10010, Description: "JetStream clustering support required"}, + JSClusterServerMemberChangeInflightErr: {Code: 400, ErrCode: 10202, Description: "cluster member change is in progress"}, JSClusterServerNotMemberErr: {Code: 400, ErrCode: 10044, Description: "server is not a member of the cluster"}, JSClusterTagsErr: {Code: 400, ErrCode: 10011, Description: "tags placement not supported for operation"}, JSClusterUnSupportFeatureErr: {Code: 503, ErrCode: 10036, Description: "not currently supported in clustered mode"}, @@ -1031,6 +1035,16 @@ func NewJSClusterRequiredError(opts ...ErrorOption) *ApiError { return ApiErrors[JSClusterRequiredErr] } +// NewJSClusterServerMemberChangeInflightError creates a new JSClusterServerMemberChangeInflightErr error: "cluster member change is in progress" +func NewJSClusterServerMemberChangeInflightError(opts ...ErrorOption) *ApiError { + eopts := parseOpts(opts) + if ae, ok := eopts.err.(*ApiError); ok { + return ae + } + + return ApiErrors[JSClusterServerMemberChangeInflightErr] +} + // NewJSClusterServerNotMemberError creates a new JSClusterServerNotMemberErr error: "server is not a member of the cluster" func NewJSClusterServerNotMemberError(opts ...ErrorOption) *ApiError { eopts := parseOpts(opts) diff --git a/vendor/github.com/nats-io/nats-server/v2/server/leafnode.go b/vendor/github.com/nats-io/nats-server/v2/server/leafnode.go index 0049823d5cb..b939f9dc1cc 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/leafnode.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/leafnode.go @@ -1049,17 +1049,22 @@ func (c *client) sendLeafConnect(clusterName string, headers bool) error { // In addition, and this is to allow auth callout, set user/password or // token if applicable. if userInfo := c.leaf.remote.curURL.User; userInfo != nil { - // For backward compatibility, if only username is provided, set both - // Token and User, not just Token. cinfo.User = userInfo.Username() var ok bool cinfo.Pass, ok = userInfo.Password() + // For backward compatibility, if only username is provided, set both + // Token and User, not just Token. if !ok { cinfo.Token = cinfo.User } } else if c.leaf.remote.username != _EMPTY_ { cinfo.User = c.leaf.remote.username cinfo.Pass = c.leaf.remote.password + // For backward compatibility, if only username is provided, set both + // Token and User, not just Token. + if cinfo.Pass == _EMPTY_ { + cinfo.Token = cinfo.User + } } b, err := json.Marshal(cinfo) if err != nil { @@ -2421,7 +2426,8 @@ func (s *Server) initLeafNodeSmapAndSendSubs(c *client) { // updateInterestForAccountOnGateway called from gateway code when processing RS+ and RS-. func (s *Server) updateInterestForAccountOnGateway(accName string, sub *subscription, delta int32) { - acc, err := s.LookupAccount(accName) + // Since we're in the gateway's readLoop, and we would otherwise block, don't allow fetching. + acc, err := s.lookupOrFetchAccount(accName, false) if acc == nil || err != nil { s.Debugf("No or bad account for %q, failed to update interest from gateway", accName) return @@ -2826,7 +2832,7 @@ func (c *client) processLeafSub(argo []byte) (err error) { // Only add in shadow subs if a new sub or qsub. if osub == nil { - if err := c.addShadowSubscriptions(acc, sub, true); err != nil { + if err := c.addShadowSubscriptions(acc, sub); err != nil { c.Errorf(err.Error()) } } diff --git a/vendor/github.com/nats-io/nats-server/v2/server/memstore.go b/vendor/github.com/nats-io/nats-server/v2/server/memstore.go index 0dbbed004ee..62555486df9 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/memstore.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/memstore.go @@ -861,17 +861,17 @@ func (ms *memStore) subjectsTotalsLocked(filterSubject string) map[string]uint64 } // NumPending will return the number of pending messages matching the filter subject starting at sequence. -func (ms *memStore) NumPending(sseq uint64, filter string, lastPerSubject bool) (total, validThrough uint64) { +func (ms *memStore) NumPending(sseq uint64, filter string, lastPerSubject bool) (total, validThrough uint64, err error) { // This needs to be a write lock, as filteredStateLocked can mutate the per-subject state. ms.mu.Lock() defer ms.mu.Unlock() ss := ms.filteredStateLocked(sseq, filter, lastPerSubject) - return ss.Msgs, ms.state.LastSeq + return ss.Msgs, ms.state.LastSeq, nil } // NumPending will return the number of pending messages matching any subject in the sublist starting at sequence. -func (ms *memStore) NumPendingMulti(sseq uint64, sl *gsl.SimpleSublist, lastPerSubject bool) (total, validThrough uint64) { +func (ms *memStore) NumPendingMulti(sseq uint64, sl *gsl.SimpleSublist, lastPerSubject bool) (total, validThrough uint64, err error) { if sl == nil { return ms.NumPending(sseq, fwcs, lastPerSubject) } @@ -886,7 +886,7 @@ func (ms *memStore) NumPendingMulti(sseq uint64, sl *gsl.SimpleSublist, lastPerS } // If past the end no results. if sseq > ms.state.LastSeq { - return 0, ms.state.LastSeq + return 0, ms.state.LastSeq, nil } update := func(fss *SimpleState) { @@ -924,7 +924,7 @@ func (ms *memStore) NumPendingMulti(sseq uint64, sl *gsl.SimpleSublist, lastPerS // If we did not encounter any partials we can return here. if !havePartial { - return ss.Msgs, ms.state.LastSeq + return ss.Msgs, ms.state.LastSeq, nil } // If we are here we need to scan the msgs. @@ -1015,7 +1015,7 @@ func (ms *memStore) NumPendingMulti(sseq uint64, sl *gsl.SimpleSublist, lastPerS ss.Msgs -= adjust } - return ss.Msgs, ms.state.LastSeq + return ss.Msgs, ms.state.LastSeq, nil } // Will check the msg limit for this tracked subject. @@ -1828,6 +1828,40 @@ func (ms *memStore) LoadPrevMsg(start uint64, smp *StoreMsg) (sm *StoreMsg, err return nil, ErrStoreEOF } +// LoadPrevMsgMulti will find the previous message matching any entry in the sublist. +func (ms *memStore) LoadPrevMsgMulti(sl *gsl.SimpleSublist, start uint64, smp *StoreMsg) (sm *StoreMsg, skip uint64, err error) { + // TODO(dlc) - for now simple linear walk to get started. + ms.mu.RLock() + defer ms.mu.RUnlock() + + if start > ms.state.LastSeq { + start = ms.state.LastSeq + } + + // If past the start no results. + if start < ms.state.FirstSeq || ms.state.Msgs == 0 { + return nil, ms.state.FirstSeq, ErrStoreEOF + } + + // Initial setup. + fseq, lseq := start, ms.state.FirstSeq + + for nseq := fseq; nseq >= lseq; nseq-- { + sm, ok := ms.msgs[nseq] + if !ok { + continue + } + if sl.HasInterest(sm.subj) { + if smp == nil { + smp = new(StoreMsg) + } + sm.copy(smp) + return smp, nseq, nil + } + } + return nil, ms.state.LastSeq, ErrStoreEOF +} + // RemoveMsg will remove the message from this store. // Will return the number of bytes removed. func (ms *memStore) RemoveMsg(seq uint64) (bool, error) { @@ -2129,7 +2163,7 @@ type consumerMemStore struct { closed bool } -func (ms *memStore) ConsumerStore(name string, cfg *ConsumerConfig) (ConsumerStore, error) { +func (ms *memStore) ConsumerStore(name string, _ time.Time, cfg *ConsumerConfig) (ConsumerStore, error) { if ms == nil { return nil, fmt.Errorf("memstore is nil") } diff --git a/vendor/github.com/nats-io/nats-server/v2/server/monitor.go b/vendor/github.com/nats-io/nats-server/v2/server/monitor.go index 83a239d5300..10e3af057d3 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/monitor.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/monitor.go @@ -500,31 +500,31 @@ func (s *Server) Connz(opts *ConnzOptions) (*Connz, error) { switch sortOpt { case ByCid, ByStart: - sort.Sort(byCid{pconns}) + sort.Sort(SortByCid{pconns}) case BySubs: - sort.Sort(sort.Reverse(bySubs{pconns})) + sort.Sort(sort.Reverse(SortBySubs{pconns})) case ByPending: - sort.Sort(sort.Reverse(byPending{pconns})) + sort.Sort(sort.Reverse(SortByPending{pconns})) case ByOutMsgs: - sort.Sort(sort.Reverse(byOutMsgs{pconns})) + sort.Sort(sort.Reverse(SortByOutMsgs{pconns})) case ByInMsgs: - sort.Sort(sort.Reverse(byInMsgs{pconns})) + sort.Sort(sort.Reverse(SortByInMsgs{pconns})) case ByOutBytes: - sort.Sort(sort.Reverse(byOutBytes{pconns})) + sort.Sort(sort.Reverse(SortByOutBytes{pconns})) case ByInBytes: - sort.Sort(sort.Reverse(byInBytes{pconns})) + sort.Sort(sort.Reverse(SortByInBytes{pconns})) case ByLast: - sort.Sort(sort.Reverse(byLast{pconns})) + sort.Sort(sort.Reverse(SortByLast{pconns})) case ByIdle: - sort.Sort(sort.Reverse(byIdle{pconns, c.Now})) + sort.Sort(sort.Reverse(SortByIdle{pconns, c.Now})) case ByUptime: - sort.Sort(byUptime{pconns, time.Now()}) + sort.Sort(SortByUptime{pconns, time.Now()}) case ByStop: - sort.Sort(sort.Reverse(byStop{pconns})) + sort.Sort(sort.Reverse(SortByStop{pconns})) case ByReason: - sort.Sort(byReason{pconns}) + sort.Sort(SortByReason{pconns}) case ByRTT: - sort.Sort(sort.Reverse(byRTT{pconns})) + sort.Sort(sort.Reverse(SortByRTT{pconns})) } minoff := c.Offset @@ -1244,6 +1244,7 @@ type Varz struct { JetStream JetStreamVarz `json:"jetstream,omitempty"` // JetStream is the JetStream state TLSTimeout float64 `json:"tls_timeout"` // TLSTimeout is how long TLS operations have to complete WriteDeadline time.Duration `json:"write_deadline"` // WriteDeadline is the maximum time writes to sockets have to complete + WriteTimeout string `json:"write_timeout,omitempty"` // WriteTimeout is the closure policy for write deadline errors Start time.Time `json:"start"` // Start is time when the server was started Now time.Time `json:"now"` // Now is the current time of the server Uptime string `json:"uptime"` // Uptime is how long the server has been running @@ -1290,15 +1291,17 @@ type JetStreamVarz struct { // ClusterOptsVarz contains monitoring cluster information type ClusterOptsVarz struct { - Name string `json:"name,omitempty"` // Name is the configured cluster name - Host string `json:"addr,omitempty"` // Host is the host the cluster listens on for connections - Port int `json:"cluster_port,omitempty"` // Port is the port the cluster listens on for connections - AuthTimeout float64 `json:"auth_timeout,omitempty"` // AuthTimeout is the time cluster connections have to complete authentication - URLs []string `json:"urls,omitempty"` // URLs is the list of cluster URLs - TLSTimeout float64 `json:"tls_timeout,omitempty"` // TLSTimeout is how long TLS operations have to complete - TLSRequired bool `json:"tls_required,omitempty"` // TLSRequired indicates if TLS is required for connections - TLSVerify bool `json:"tls_verify,omitempty"` // TLSVerify indicates if full verification of TLS connections is performed - PoolSize int `json:"pool_size,omitempty"` // PoolSize is the configured route connection pool size + Name string `json:"name,omitempty"` // Name is the configured cluster name + Host string `json:"addr,omitempty"` // Host is the host the cluster listens on for connections + Port int `json:"cluster_port,omitempty"` // Port is the port the cluster listens on for connections + AuthTimeout float64 `json:"auth_timeout,omitempty"` // AuthTimeout is the time cluster connections have to complete authentication + URLs []string `json:"urls,omitempty"` // URLs is the list of cluster URLs + TLSTimeout float64 `json:"tls_timeout,omitempty"` // TLSTimeout is how long TLS operations have to complete + TLSRequired bool `json:"tls_required,omitempty"` // TLSRequired indicates if TLS is required for connections + TLSVerify bool `json:"tls_verify,omitempty"` // TLSVerify indicates if full verification of TLS connections is performed + PoolSize int `json:"pool_size,omitempty"` // PoolSize is the configured route connection pool size + WriteDeadline time.Duration `json:"write_deadline,omitempty"` // WriteDeadline is the maximum time writes to sockets have to complete + WriteTimeout string `json:"write_timeout,omitempty"` // WriteTimeout is the closure policy for write deadline errors } // GatewayOptsVarz contains monitoring gateway information @@ -1314,6 +1317,8 @@ type GatewayOptsVarz struct { ConnectRetries int `json:"connect_retries,omitempty"` // ConnectRetries is how many connection attempts the route will make Gateways []RemoteGatewayOptsVarz `json:"gateways,omitempty"` // Gateways is state of configured gateway remotes RejectUnknown bool `json:"reject_unknown,omitempty"` // RejectUnknown indicates if unknown cluster connections will be rejected + WriteDeadline time.Duration `json:"write_deadline,omitempty"` // WriteDeadline is the maximum time writes to sockets have to complete + WriteTimeout string `json:"write_timeout,omitempty"` // WriteTimeout is the closure policy for write deadline errors } // RemoteGatewayOptsVarz contains monitoring remote gateway information @@ -1333,6 +1338,8 @@ type LeafNodeOptsVarz struct { TLSVerify bool `json:"tls_verify,omitempty"` // TLSVerify indicates if full verification of TLS connections is performed Remotes []RemoteLeafOptsVarz `json:"remotes,omitempty"` // Remotes is state of configured Leafnode remotes TLSOCSPPeerVerify bool `json:"tls_ocsp_peer_verify,omitempty"` // TLSOCSPPeerVerify indicates if OCSP verification will be performed + WriteDeadline time.Duration `json:"write_deadline,omitempty"` // WriteDeadline is the maximum time writes to sockets have to complete + WriteTimeout string `json:"write_timeout,omitempty"` // WriteTimeout is the closure policy for write deadline errors } // DenyRules Contains lists of subjects not allowed to be imported/exported @@ -1501,7 +1508,8 @@ func (s *Server) HandleRoot(w http.ResponseWriter, r *http.Request) { LeafNodes %s Gateways %s Raft Groups %s - Health Probe %s + Health Probe %s + Expvar %s Help `, @@ -1518,6 +1526,7 @@ func (s *Server) HandleRoot(w http.ResponseWriter, r *http.Request) { s.basePath(GatewayzPath), GatewayzPath, s.basePath(RaftzPath), RaftzPath, s.basePath(HealthzPath), HealthzPath, + s.basePath(ExpvarzPath), ExpvarzPath, ) } @@ -1599,14 +1608,16 @@ func (s *Server) createVarz(pcpu float64, rss int64) *Varz { HTTPBasePath: opts.HTTPBasePath, HTTPSPort: opts.HTTPSPort, Cluster: ClusterOptsVarz{ - Name: info.Cluster, - Host: c.Host, - Port: c.Port, - AuthTimeout: c.AuthTimeout, - TLSTimeout: c.TLSTimeout, - TLSRequired: clustTlsReq, - TLSVerify: clustTlsReq, - PoolSize: opts.Cluster.PoolSize, + Name: info.Cluster, + Host: c.Host, + Port: c.Port, + AuthTimeout: c.AuthTimeout, + TLSTimeout: c.TLSTimeout, + TLSRequired: clustTlsReq, + TLSVerify: clustTlsReq, + PoolSize: opts.Cluster.PoolSize, + WriteDeadline: opts.Cluster.WriteDeadline, + WriteTimeout: opts.Cluster.WriteTimeout.String(), }, Gateway: GatewayOptsVarz{ Name: gw.Name, @@ -1620,6 +1631,8 @@ func (s *Server) createVarz(pcpu float64, rss int64) *Varz { ConnectRetries: gw.ConnectRetries, Gateways: []RemoteGatewayOptsVarz{}, RejectUnknown: gw.RejectUnknown, + WriteDeadline: opts.Cluster.WriteDeadline, + WriteTimeout: opts.Cluster.WriteTimeout.String(), }, LeafNode: LeafNodeOptsVarz{ Host: ln.Host, @@ -1630,6 +1643,8 @@ func (s *Server) createVarz(pcpu float64, rss int64) *Varz { TLSVerify: leafTlsVerify, TLSOCSPPeerVerify: leafTlsOCSPPeerVerify, Remotes: []RemoteLeafOptsVarz{}, + WriteDeadline: opts.Cluster.WriteDeadline, + WriteTimeout: opts.Cluster.WriteTimeout.String(), }, MQTT: MQTTOptsVarz{ Host: mqtt.Host, @@ -1746,6 +1761,7 @@ func (s *Server) updateVarzConfigReloadableFields(v *Varz) { v.MaxPending = opts.MaxPending v.TLSTimeout = opts.TLSTimeout v.WriteDeadline = opts.WriteDeadline + v.WriteTimeout = opts.WriteTimeout.String() v.ConfigLoadTime = s.configTime.UTC() v.ConfigDigest = opts.configDigest v.Tags = opts.Tags @@ -2886,6 +2902,7 @@ type JSzOptions struct { Accounts bool `json:"accounts,omitempty"` Streams bool `json:"streams,omitempty"` Consumer bool `json:"consumer,omitempty"` + DirectConsumer bool `json:"direct_consumer,omitempty"` Config bool `json:"config,omitempty"` LeaderOnly bool `json:"leader_only,omitempty"` Offset int `json:"offset,omitempty"` @@ -2934,6 +2951,7 @@ type StreamDetail struct { Config *StreamConfig `json:"config,omitempty"` State StreamState `json:"state,omitempty"` Consumer []*ConsumerInfo `json:"consumer_detail,omitempty"` + DirectConsumer []*ConsumerInfo `json:"direct_consumer_detail,omitempty"` Mirror *StreamSourceInfo `json:"mirror,omitempty"` Sources []*StreamSourceInfo `json:"sources,omitempty"` RaftGroup string `json:"stream_raft_group,omitempty"` @@ -2953,14 +2971,23 @@ type AccountDetail struct { Streams []StreamDetail `json:"stream_detail,omitempty"` } +// MetaSnapshotStats shows information about meta snapshots. +type MetaSnapshotStats struct { + PendingEntries uint64 `json:"pending_entries"` // PendingEntries is the count of pending entries in the meta layer + PendingSize uint64 `json:"pending_size"` // PendingSize is the size in bytes of pending entries in the meta layer + LastTime time.Time `json:"last_time,omitempty"` // LastTime is when the last meta snapshot was taken + LastDuration time.Duration `json:"last_duration,omitempty"` // LastDuration is how long the last meta snapshot took +} + // MetaClusterInfo shows information about the meta group. type MetaClusterInfo struct { - Name string `json:"name,omitempty"` // Name is the name of the cluster - Leader string `json:"leader,omitempty"` // Leader is the server name of the cluster leader - Peer string `json:"peer,omitempty"` // Peer is unique ID of the leader - Replicas []*PeerInfo `json:"replicas,omitempty"` // Replicas is a list of known peers - Size int `json:"cluster_size"` // Size is the known size of the cluster - Pending int `json:"pending"` // Pending is how many RAFT messages are not yet processed + Name string `json:"name,omitempty"` // Name is the name of the cluster + Leader string `json:"leader,omitempty"` // Leader is the server name of the cluster leader + Peer string `json:"peer,omitempty"` // Peer is unique ID of the leader + Replicas []*PeerInfo `json:"replicas,omitempty"` // Replicas is a list of known peers + Size int `json:"cluster_size"` // Size is the known size of the cluster + Pending int `json:"pending"` // Pending is how many RAFT messages are not yet processed + Snapshot *MetaSnapshotStats `json:"snapshot"` // Snapshot contains meta snapshot statistics } // JSInfo has detailed information on JetStream. @@ -2982,7 +3009,7 @@ type JSInfo struct { Total int `json:"total"` } -func (s *Server) accountDetail(jsa *jsAccount, optStreams, optConsumers, optCfg, optRaft, optStreamLeader bool) *AccountDetail { +func (s *Server) accountDetail(jsa *jsAccount, optStreams, optConsumers, optDirectConsumers, optCfg, optRaft, optStreamLeader bool) *AccountDetail { jsa.mu.RLock() acc := jsa.account name := acc.GetName() @@ -3064,6 +3091,18 @@ func (s *Server) accountDetail(jsa *jsAccount, optStreams, optConsumers, optCfg, } } } + if optDirectConsumers { + for _, consumer := range stream.getDirectConsumers() { + cInfo := consumer.info() + if cInfo == nil { + continue + } + if !optCfg { + cInfo.Config = nil + } + sdet.DirectConsumer = append(sdet.Consumer, cInfo) + } + } } detail.Streams = append(detail.Streams, sdet) } @@ -3087,7 +3126,7 @@ func (s *Server) JszAccount(opts *JSzOptions) (*AccountDetail, error) { if !ok { return nil, fmt.Errorf("account %q not jetstream enabled", acc) } - return s.accountDetail(jsa, opts.Streams, opts.Consumer, opts.Config, opts.RaftGroups, opts.StreamLeaderOnly), nil + return s.accountDetail(jsa, opts.Streams, opts.Consumer, opts.DirectConsumer, opts.Config, opts.RaftGroups, opts.StreamLeaderOnly), nil } // helper to get cluster info from node via dummy group @@ -3165,6 +3204,7 @@ func (s *Server) Jsz(opts *JSzOptions) (*JSInfo, error) { if mg := js.getMetaGroup(); mg != nil { if ci := s.raftNodeToClusterInfo(mg); ci != nil { + entries, bytes := mg.Size() jsi.Meta = &MetaClusterInfo{Name: ci.Name, Leader: ci.Leader, Peer: getHash(ci.Leader), Size: mg.ClusterSize()} if isLeader { jsi.Meta.Replicas = ci.Replicas @@ -3172,6 +3212,24 @@ func (s *Server) Jsz(opts *JSzOptions) (*JSInfo, error) { if ipq := s.jsAPIRoutedReqs; ipq != nil { jsi.Meta.Pending = ipq.len() } + // Add meta snapshot stats + jsi.Meta.Snapshot = &MetaSnapshotStats{ + PendingEntries: entries, + PendingSize: bytes, + } + js.mu.RLock() + cluster := js.cluster + js.mu.RUnlock() + if cluster != nil { + timeNanos := atomic.LoadInt64(&cluster.lastMetaSnapTime) + durationNanos := atomic.LoadInt64(&cluster.lastMetaSnapDuration) + if timeNanos > 0 { + jsi.Meta.Snapshot.LastTime = time.Unix(0, timeNanos).UTC() + } + if durationNanos > 0 { + jsi.Meta.Snapshot.LastDuration = time.Duration(durationNanos) + } + } } } @@ -3236,7 +3294,7 @@ func (s *Server) Jsz(opts *JSzOptions) (*JSInfo, error) { jsi.AccountDetails = make([]*AccountDetail, 0, len(accounts)) for _, jsa := range accounts { - detail := s.accountDetail(jsa, opts.Streams, opts.Consumer, opts.Config, opts.RaftGroups, opts.StreamLeaderOnly) + detail := s.accountDetail(jsa, opts.Streams, opts.Consumer, opts.DirectConsumer, opts.Config, opts.RaftGroups, opts.StreamLeaderOnly) jsi.AccountDetails = append(jsi.AccountDetails, detail) } } @@ -3261,6 +3319,10 @@ func (s *Server) HandleJsz(w http.ResponseWriter, r *http.Request) { if err != nil { return } + directConsumers, err := decodeBool(w, r, "direct-consumers") + if err != nil { + return + } config, err := decodeBool(w, r, "config") if err != nil { return @@ -3292,6 +3354,7 @@ func (s *Server) HandleJsz(w http.ResponseWriter, r *http.Request) { Accounts: accounts, Streams: streams, Consumer: consumers, + DirectConsumer: directConsumers, Config: config, LeaderOnly: leader, Offset: offset, diff --git a/vendor/github.com/nats-io/nats-server/v2/server/monitor_sort_opts.go b/vendor/github.com/nats-io/nats-server/v2/server/monitor_sort_opts.go index 3a2a0b667ab..79152f8a776 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/monitor_sort_opts.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/monitor_sort_opts.go @@ -50,64 +50,64 @@ const ( // Individual sort options provide the Less for sort.Interface. Len and Swap are on cList. // CID -type byCid struct{ ConnInfos } +type SortByCid struct{ ConnInfos } -func (l byCid) Less(i, j int) bool { return l.ConnInfos[i].Cid < l.ConnInfos[j].Cid } +func (l SortByCid) Less(i, j int) bool { return l.ConnInfos[i].Cid < l.ConnInfos[j].Cid } // Number of Subscriptions -type bySubs struct{ ConnInfos } +type SortBySubs struct{ ConnInfos } -func (l bySubs) Less(i, j int) bool { return l.ConnInfos[i].NumSubs < l.ConnInfos[j].NumSubs } +func (l SortBySubs) Less(i, j int) bool { return l.ConnInfos[i].NumSubs < l.ConnInfos[j].NumSubs } // Pending Bytes -type byPending struct{ ConnInfos } +type SortByPending struct{ ConnInfos } -func (l byPending) Less(i, j int) bool { return l.ConnInfos[i].Pending < l.ConnInfos[j].Pending } +func (l SortByPending) Less(i, j int) bool { return l.ConnInfos[i].Pending < l.ConnInfos[j].Pending } // Outbound Msgs -type byOutMsgs struct{ ConnInfos } +type SortByOutMsgs struct{ ConnInfos } -func (l byOutMsgs) Less(i, j int) bool { return l.ConnInfos[i].OutMsgs < l.ConnInfos[j].OutMsgs } +func (l SortByOutMsgs) Less(i, j int) bool { return l.ConnInfos[i].OutMsgs < l.ConnInfos[j].OutMsgs } // Inbound Msgs -type byInMsgs struct{ ConnInfos } +type SortByInMsgs struct{ ConnInfos } -func (l byInMsgs) Less(i, j int) bool { return l.ConnInfos[i].InMsgs < l.ConnInfos[j].InMsgs } +func (l SortByInMsgs) Less(i, j int) bool { return l.ConnInfos[i].InMsgs < l.ConnInfos[j].InMsgs } // Outbound Bytes -type byOutBytes struct{ ConnInfos } +type SortByOutBytes struct{ ConnInfos } -func (l byOutBytes) Less(i, j int) bool { return l.ConnInfos[i].OutBytes < l.ConnInfos[j].OutBytes } +func (l SortByOutBytes) Less(i, j int) bool { return l.ConnInfos[i].OutBytes < l.ConnInfos[j].OutBytes } // Inbound Bytes -type byInBytes struct{ ConnInfos } +type SortByInBytes struct{ ConnInfos } -func (l byInBytes) Less(i, j int) bool { return l.ConnInfos[i].InBytes < l.ConnInfos[j].InBytes } +func (l SortByInBytes) Less(i, j int) bool { return l.ConnInfos[i].InBytes < l.ConnInfos[j].InBytes } // Last Activity -type byLast struct{ ConnInfos } +type SortByLast struct{ ConnInfos } -func (l byLast) Less(i, j int) bool { +func (l SortByLast) Less(i, j int) bool { return l.ConnInfos[i].LastActivity.UnixNano() < l.ConnInfos[j].LastActivity.UnixNano() } // Idle time -type byIdle struct { +type SortByIdle struct { ConnInfos now time.Time } -func (l byIdle) Less(i, j int) bool { +func (l SortByIdle) Less(i, j int) bool { return l.now.Sub(l.ConnInfos[i].LastActivity) < l.now.Sub(l.ConnInfos[j].LastActivity) } // Uptime -type byUptime struct { +type SortByUptime struct { ConnInfos now time.Time } -func (l byUptime) Less(i, j int) bool { +func (l SortByUptime) Less(i, j int) bool { ci := l.ConnInfos[i] cj := l.ConnInfos[j] var upi, upj time.Duration @@ -125,25 +125,25 @@ func (l byUptime) Less(i, j int) bool { } // Stop -type byStop struct{ ConnInfos } +type SortByStop struct{ ConnInfos } -func (l byStop) Less(i, j int) bool { +func (l SortByStop) Less(i, j int) bool { ciStop := l.ConnInfos[i].Stop cjStop := l.ConnInfos[j].Stop return ciStop.Before(*cjStop) } // Reason -type byReason struct{ ConnInfos } +type SortByReason struct{ ConnInfos } -func (l byReason) Less(i, j int) bool { +func (l SortByReason) Less(i, j int) bool { return l.ConnInfos[i].Reason < l.ConnInfos[j].Reason } // RTT - Default is descending -type byRTT struct{ ConnInfos } +type SortByRTT struct{ ConnInfos } -func (l byRTT) Less(i, j int) bool { return l.ConnInfos[i].rtt < l.ConnInfos[j].rtt } +func (l SortByRTT) Less(i, j int) bool { return l.ConnInfos[i].rtt < l.ConnInfos[j].rtt } // IsValid determines if a sort option is valid func (s SortOpt) IsValid() bool { diff --git a/vendor/github.com/nats-io/nats-server/v2/server/mqtt.go b/vendor/github.com/nats-io/nats-server/v2/server/mqtt.go index f3646ed9c3c..87bfd551065 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/mqtt.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/mqtt.go @@ -28,9 +28,11 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "time" "unicode/utf8" + "github.com/nats-io/jwt/v2" "github.com/nats-io/nuid" ) @@ -102,35 +104,38 @@ const ( // wildcard '#' semantic. mqttMultiLevelSidSuffix = " fwc" + // This is the prefix used for all subjects used by MQTT code. + mqttPrefix = "$MQTT." + // This is the prefix for NATS subscriptions subjects associated as delivery // subject of JS consumer. We want to make them unique so will prevent users // MQTT subscriptions to start with this. - mqttSubPrefix = "$MQTT.sub." + mqttSubPrefix = mqttPrefix + "sub." // Stream name for MQTT messages on a given account mqttStreamName = "$MQTT_msgs" - mqttStreamSubjectPrefix = "$MQTT.msgs." + mqttStreamSubjectPrefix = mqttPrefix + "msgs." // Stream name for MQTT retained messages on a given account mqttRetainedMsgsStreamName = "$MQTT_rmsgs" - mqttRetainedMsgsStreamSubject = "$MQTT.rmsgs." + mqttRetainedMsgsStreamSubject = mqttPrefix + "rmsgs." // Stream name for MQTT sessions on a given account mqttSessStreamName = "$MQTT_sess" - mqttSessStreamSubjectPrefix = "$MQTT.sess." + mqttSessStreamSubjectPrefix = mqttPrefix + "sess." // Stream name prefix for MQTT sessions on a given account mqttSessionsStreamNamePrefix = "$MQTT_sess_" // Stream name and subject for incoming MQTT QoS2 messages mqttQoS2IncomingMsgsStreamName = "$MQTT_qos2in" - mqttQoS2IncomingMsgsStreamSubjectPrefix = "$MQTT.qos2.in." + mqttQoS2IncomingMsgsStreamSubjectPrefix = mqttPrefix + "qos2.in." // Stream name and subjects for outgoing MQTT QoS (PUBREL) messages mqttOutStreamName = "$MQTT_out" - mqttOutSubjectPrefix = "$MQTT.out." - mqttPubRelSubjectPrefix = "$MQTT.out.pubrel." - mqttPubRelDeliverySubjectPrefix = "$MQTT.deliver.pubrel." + mqttOutSubjectPrefix = mqttPrefix + "out." + mqttPubRelSubjectPrefix = mqttPrefix + "out.pubrel." + mqttPubRelDeliverySubjectPrefix = mqttPrefix + "deliver.pubrel." mqttPubRelConsumerDurablePrefix = "$MQTT_PUBREL_" // As per spec, MQTT server may not redeliver QoS 1 and 2 messages to @@ -148,7 +153,7 @@ const ( mqttMaxAckTotalLimit = 0xFFFF // Prefix of the reply subject for JS API requests. - mqttJSARepliesPrefix = "$MQTT.JSA." + mqttJSARepliesPrefix = mqttPrefix + "JSA." // Those are tokens that are used for the reply subject of JS API requests. // For instance "$MQTT.JSA..SC." is the reply subject @@ -190,6 +195,7 @@ const ( mqttDefaultRetainedCacheTTL = 2 * time.Minute mqttRetainedTransferTimeout = 10 * time.Second mqttDefaultJSAPITimeout = 5 * time.Second + mqttRetainedFlagDelMarker = '-' ) const ( @@ -234,6 +240,7 @@ var ( errMQTTPacketIdentifierIsZero = errors.New("packet identifier cannot be 0") errMQTTUnsupportedCharacters = errors.New("character ' ' not supported for MQTT topics") errMQTTInvalidSession = errors.New("invalid MQTT session") + errMQTTInvalidRetainFlags = errors.New("invalid retained message flags") ) type srvMQTT struct { @@ -248,8 +255,6 @@ type mqttSessionManager struct { sessions map[string]*mqttAccountSessionManager // key is account name } -var testDisableRMSCache = false - type mqttAccountSessionManager struct { mu sync.RWMutex sessions map[string]*mqttSession // key is MQTT client ID @@ -261,9 +266,7 @@ type mqttAccountSessionManager struct { retmsgs map[string]*mqttRetainedMsgRef // retained messages rmsCache *sync.Map // map[subject]mqttRetainedMsg jsa mqttJSA - rrmLastSeq uint64 // Restore retained messages expected last sequence - rrmDoneCh chan struct{} // To notify the caller that all retained messages have been loaded - domainTk string // Domain (with trailing "."), or possibly empty. This is added to session subject. + domainTk string // Domain (with trailing "."), or possibly empty. This is added to session subject. } type mqttJSAResponse struct { @@ -361,9 +364,8 @@ type mqttRetainedMsg struct { } type mqttRetainedMsgRef struct { - sseq uint64 - floor uint64 - sub *subscription + sseq uint64 + sub *subscription } // mqttSub contains fields associated with a MQTT subscription, and is added to @@ -1182,9 +1184,7 @@ func (s *Server) mqttCreateAccountSessionManager(acc *Account, quitCh chan struc quitCh: quitCh, timeout: mqttJSAPITimeout, }, - } - if !testDisableRMSCache { - as.rmsCache = &sync.Map{} + rmsCache: &sync.Map{}, } // TODO record domain name in as here @@ -1280,12 +1280,10 @@ func (s *Server) mqttCreateAccountSessionManager(acc *Account, quitCh chan struc }) // Start the go routine that will clean up cached retained messages that expired. - if as.rmsCache != nil { - s.startGoRoutine(func() { - defer s.grWG.Done() - as.cleanupRetainedMessageCache(s, closeCh) - }) - } + s.startGoRoutine(func() { + defer s.grWG.Done() + as.cleanupRetainedMessageCache(s, closeCh) + }) lookupStream := func(stream, txt string) (*StreamInfo, error) { si, err := jsa.lookupStream(stream) @@ -1473,18 +1471,6 @@ func (s *Server) mqttCreateAccountSessionManager(acc *Account, quitCh chan struc return nil, err } - var lastSeq uint64 - var rmDoneCh chan struct{} - st := si.State - if st.Msgs > 0 { - lastSeq = st.LastSeq - if lastSeq > 0 { - rmDoneCh = make(chan struct{}) - as.rrmLastSeq = lastSeq - as.rrmDoneCh = rmDoneCh - } - } - // Opportunistically delete the old (legacy) consumer, from v2.10.10 and // before. Ignore any errors that might arise. rmLegacyDurName := mqttRetainedMsgsStreamName + "_" + jsa.id @@ -1507,19 +1493,6 @@ func (s *Server) mqttCreateAccountSessionManager(acc *Account, quitCh chan struc return nil, fmt.Errorf("create retained messages consumer for account %q: %v", accName, err) } - if lastSeq > 0 { - ttl := time.NewTimer(mqttJSAPITimeout) - defer ttl.Stop() - - select { - case <-rmDoneCh: - case <-ttl.C: - s.Warnf("Timing out waiting to load %v retained messages", st.Msgs) - case <-quitCh: - return nil, ErrServerNotRunning - } - } - // Set this so that on defer we don't cleanup. success = true @@ -1674,8 +1647,7 @@ func (jsa *mqttJSA) newRequestExMulti(kind, subject, cidHash string, hdrs []int, } func (jsa *mqttJSA) sendAck(ackSubject string) { - // We pass -1 for the hdr so that the send loop does not need to - // add the "client info" header. This is not a JS API request per se. + // Send to the ack subject with no payload. jsa.sendMsg(ackSubject, nil) } @@ -1683,6 +1655,8 @@ func (jsa *mqttJSA) sendMsg(subj string, msg []byte) { if subj == _EMPTY_ { return } + // We pass -1 for the hdr so that the send loop does not need to + // add the "client info" header. This is not a JS API request per se. jsa.sendq.push(&mqttJSPubMsg{subj: subj, msg: msg, hdr: -1}) } @@ -1840,12 +1814,16 @@ func (jsa *mqttJSA) loadMsg(streamName string, seq uint64) (*StoredMsg, error) { return lmr.Message, lmr.ToError() } -func (jsa *mqttJSA) storeMsg(subject string, headers int, msg []byte) (*JSPubAckResponse, error) { - return jsa.storeMsgWithKind(mqttJSAMsgStore, subject, headers, msg) +func (jsa *mqttJSA) storeMsgNoWait(subject string, hdrLen int, msg []byte) { + jsa.sendq.push(&mqttJSPubMsg{ + subj: subject, + msg: msg, + hdr: hdrLen, + }) } -func (jsa *mqttJSA) storeMsgWithKind(kind, subject string, headers int, msg []byte) (*JSPubAckResponse, error) { - smri, err := jsa.newRequest(kind, subject, headers, msg) +func (jsa *mqttJSA) storeMsg(subject string, headers int, msg []byte) (*JSPubAckResponse, error) { + smri, err := jsa.newRequest(mqttJSAMsgStore, subject, headers, msg) if err != nil { return nil, err } @@ -1992,35 +1970,39 @@ func (as *mqttAccountSessionManager) processJSAPIReplies(_ *subscription, pc *cl // No lock held on entry. func (as *mqttAccountSessionManager) processRetainedMsg(_ *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { h, m := c.msgParts(rmsg) - rm, err := mqttDecodeRetainedMessage(h, m) + // We need to strip the trailing "\r\n". + if l := len(m); l >= LEN_CR_LF { + m = m[:l-LEN_CR_LF] + } + rm, err := mqttDecodeRetainedMessage(subject, h, m) if err != nil { return } - // If lastSeq is 0 (nothing to recover, or done doing it) and this is - // from our own server, ignore. - as.mu.RLock() - if as.rrmLastSeq == 0 && rm.Origin == as.jsa.id { - as.mu.RUnlock() - return - } - as.mu.RUnlock() - - // At this point we either recover from our own server, or process a remote retained message. + // The as.jsa.id is immutable, so no need to have a rlock here. + local := rm.Origin == as.jsa.id + // Get the stream sequence for this message. seq, _, _ := ackReplyInfo(reply) - - // Handle this retained message, no need to copy the bytes. - as.handleRetainedMsg(rm.Subject, &mqttRetainedMsgRef{sseq: seq}, rm, false) - - // If we were recovering (lastSeq > 0), then check if we are done. - as.mu.Lock() - if as.rrmLastSeq > 0 && seq >= as.rrmLastSeq { - as.rrmLastSeq = 0 - close(as.rrmDoneCh) - as.rrmDoneCh = nil + if len(m) == 0 { + // An empty payload means that we need to remove the retained message. + rmSeq := as.removeRetainedMsg(rm.Subject, 0) + if local { + if rmSeq > 0 { + // This is for backward compatibility reasons. + // Should be removed in a future release. + as.notifyRetainedMsgDeleted(rm.Subject, rmSeq) + } + // Delete this very message we just processed, we don't need it anymore. + as.deleteRetainedMsg(seq) + } + } else { + // Add this retained message. The `rm.Msg` references some buffer that we + // don't own. But addRetainedMsg() will take care of making a copy of + // `rm.Msg` it `rm` ends-up being stored in the cache. + as.addRetainedMsg(rm.Subject, &mqttRetainedMsgRef{sseq: seq}, rm) } - as.mu.Unlock() } +// NOTE: This is maintained for backward compatibility reasons. Should be removed in 2.14/2.15? func (as *mqttAccountSessionManager) processRetainedMsgDel(_ *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { idHash := tokenAt(subject, 3) if idHash == _EMPTY_ || idHash == as.jsa.id { @@ -2034,7 +2016,7 @@ func (as *mqttAccountSessionManager) processRetainedMsgDel(_ *subscription, c *c if err := json.Unmarshal(msg, &drm); err != nil { return } - as.handleRetainedMsgDel(drm.Subject, drm.Seq) + as.removeRetainedMsg(drm.Subject, drm.Seq) } // This will receive all JS API replies for a request to store a session record, @@ -2199,6 +2181,7 @@ func (as *mqttAccountSessionManager) sendJSAPIrequests(s *Server, c *client, acc sendq := as.jsa.sendq quitCh := as.jsa.quitCh ci := ClientInfo{Account: accName, Cluster: cluster} + acc := c.acc as.mu.RUnlock() // The account session manager does not have a suhtdown API per-se, instead, @@ -2259,7 +2242,13 @@ func (as *mqttAccountSessionManager) sendJSAPIrequests(s *Server, c *client, acc c.pa.reply = []byte(r.reply) c.pa.size = nsize c.pa.szb = []byte(strconv.Itoa(nsize)) + c.pa.mapped = nil + if acc.hasMappings() { + if changed := c.selectMappedSubject(); changed { + c.traceOutOp("MAPPINGS", fmt.Appendf(nil, "%s -> %s", c.pa.mapped, c.pa.subject)) + } + } c.processInboundClientMsg(msg) c.flushClients(0) } @@ -2277,7 +2266,7 @@ func (as *mqttAccountSessionManager) sendJSAPIrequests(s *Server, c *client, acc // If a message for this topic already existed, the existing record is updated // with the provided information. // Lock not held on entry. -func (as *mqttAccountSessionManager) handleRetainedMsg(key string, rf *mqttRetainedMsgRef, rm *mqttRetainedMsg, copyBytesToCache bool) { +func (as *mqttAccountSessionManager) addRetainedMsg(key string, rf *mqttRetainedMsgRef, rm *mqttRetainedMsg) { as.mu.Lock() defer as.mu.Unlock() if as.retmsgs == nil { @@ -2285,72 +2274,45 @@ func (as *mqttAccountSessionManager) handleRetainedMsg(key string, rf *mqttRetai as.sl = NewSublistWithCache() } else { // Check if we already had one retained message. If so, update the existing one. - if erm, exists := as.retmsgs[key]; exists { - // If the new sequence is below the floor or the existing one, - // then ignore the new one. - if rf.sseq <= erm.sseq || rf.sseq <= erm.floor { - return - } - // Capture existing sequence number so we can return it as the old sequence. - erm.sseq = rf.sseq - // Clear the floor - erm.floor = 0 - // If sub is nil, it means that it was removed from sublist following a - // network delete. So need to add it now. - if erm.sub == nil { - erm.sub = &subscription{subject: []byte(key)} - as.sl.Insert(erm.sub) - } - + if erf, exists := as.retmsgs[key]; exists { + // Update the stream sequence with the new value. + erf.sseq = rf.sseq // Update the in-memory retained message cache but only for messages // that are already in the cache, i.e. have been (recently) used. - as.setCachedRetainedMsg(key, rm, true, copyBytesToCache) + // If that is the case, we ask setCachedRetainedMsg() to make a copy + // of rm.Msg bytes slice. + as.setCachedRetainedMsg(key, rm, true, true) return } } - rf.sub = &subscription{subject: []byte(key)} as.retmsgs[key] = rf as.sl.Insert(rf.sub) } -// Removes the retained message for the given `subject` if present, and returns the -// stream sequence it was stored at. It will be 0 if no retained message was removed. -// If a sequence is passed and not 0, then the retained message will be removed only -// if the given sequence is equal or higher to what is stored. -// -// No lock held on entry. -func (as *mqttAccountSessionManager) handleRetainedMsgDel(subject string, seq uint64) uint64 { - var seqToRemove uint64 +// Remove the retained message stored with the `subject` key from the map/cache. +// When invoked from the retained message stream's consumer, this function will +// be called with `seq == 0`, this is because add/remove are serialized in this +// stream and so the request is to remove the current retained message. +// But in some conditions, we will invoke this function from some other places +// with `seq > 0` which means that the retained message will be removed only if +// its sequence is the same than the provided one. +// This function returns the sequence associated with the existing retained +// message that is being removed (used with `seq == 0`) and returns 0 if the +// retained message was not removed from the map (not found or sequence did not +// match). +func (as *mqttAccountSessionManager) removeRetainedMsg(subject string, seq uint64) uint64 { as.mu.Lock() - if as.retmsgs == nil { - as.retmsgs = make(map[string]*mqttRetainedMsgRef) - as.sl = NewSublistWithCache() + defer as.mu.Unlock() + rm, ok := as.retmsgs[subject] + if !ok || (seq > 0 && rm.sseq != seq) { + return 0 } - if erm, ok := as.retmsgs[subject]; ok { - if as.rmsCache != nil { - as.rmsCache.Delete(subject) - } - if erm.sub != nil { - as.sl.Remove(erm.sub) - erm.sub = nil - } - // If processing a delete request from the network, then seq will be > 0. - // If that is the case and it is greater or equal to what we have, we need - // to record the floor for this subject. - if seq != 0 && seq >= erm.sseq { - erm.sseq = 0 - erm.floor = seq - } else if seq == 0 { - delete(as.retmsgs, subject) - seqToRemove = erm.sseq - } - } else if seq != 0 { - rf := &mqttRetainedMsgRef{floor: seq} - as.retmsgs[subject] = rf - } - as.mu.Unlock() - return seqToRemove + seq = rm.sseq + as.rmsCache.Delete(subject) + delete(as.retmsgs, subject) + as.sl.Remove(rm.sub) + return seq } // First check if this session's client ID is already in the "locked" map, @@ -2476,9 +2438,9 @@ func (sess *mqttSession) processSub( } if len(rms) > 0 { - for _, ss := range subs { - as.serializeRetainedMsgsForSub(rms, sess, c, ss, trace) - } + // Only deal with retained messages for the normal subscription, + // not the shadow one (which is for a different account and subject). + as.serializeRetainedMsgsForSub(rms, sess, c, sub, trace) } return sub, nil @@ -2501,10 +2463,6 @@ func (sess *mqttSession) processSub( func (as *mqttAccountSessionManager) processSubs(sess *mqttSession, c *client, filters []*mqttFilter, fromSubProto, trace bool) ([]*subscription, error) { - c.mu.Lock() - acc := c.acc - c.mu.Unlock() - // Helper to determine if we need to create a separate top-level // subscription for a wildcard. fwc := func(subject string) (bool, string, string) { @@ -2519,7 +2477,7 @@ func (as *mqttAccountSessionManager) processSubs(sess *mqttSession, c *client, return true, fwcsubject, fwcsid } - rmSubjects := map[string]struct{}{} + rmSubjects := map[string]uint64{} // Preload retained messages for all requested subscriptions. Also, since // it's the first iteration over the filter list, do some cleanup. for _, f := range filters { @@ -2551,43 +2509,16 @@ func (as *mqttAccountSessionManager) processSubs(sess *mqttSession, c *client, // Find retained messages. if fromSubProto { - addRMSubjects := func(subject string) error { - sub := &subscription{ - client: c, - subject: []byte(subject), - sid: []byte(subject), - } - if err := c.addShadowSubscriptions(acc, sub, false); err != nil { - return err - } - - for _, sub := range append([]*subscription{sub}, sub.shadow...) { - as.addRetainedSubjectsForSubject(rmSubjects, bytesToString(sub.subject)) - for _, ss := range sub.shadow { - as.addRetainedSubjectsForSubject(rmSubjects, bytesToString(ss.subject)) - } - } - return nil - } - - if err := addRMSubjects(f.filter); err != nil { - f.qos = mqttSubAckFailure - continue - } + as.addRetainedSubjectsForSubject(rmSubjects, f.filter) if need, subject, _ := fwc(f.filter); need { - if err := addRMSubjects(subject); err != nil { - f.qos = mqttSubAckFailure - continue - } + as.addRetainedSubjectsForSubject(rmSubjects, subject) } } } - serializeRMS := len(rmSubjects) > 0 var rms map[string]*mqttRetainedMsg - if serializeRMS { - // Make the best effort to load retained messages. We will identify - // errors in the next pass. + if len(rmSubjects) > 0 { + // Make the best effort to load retained messages. rms = as.loadRetainedMessages(rmSubjects, c) } @@ -2708,13 +2639,13 @@ func (as *mqttAccountSessionManager) processSubs(sess *mqttSession, c *client, // Runs from the client's readLoop. // Account session manager lock held on entry. // Session lock held on entry. -func (as *mqttAccountSessionManager) serializeRetainedMsgsForSub(rms map[string]*mqttRetainedMsg, sess *mqttSession, c *client, sub *subscription, trace bool) error { +func (as *mqttAccountSessionManager) serializeRetainedMsgsForSub(rms map[string]*mqttRetainedMsg, sess *mqttSession, c *client, sub *subscription, trace bool) { if len(as.retmsgs) == 0 || len(rms) == 0 { - return nil + return } result := as.sl.ReverseMatch(string(sub.subject)) if len(result.psubs) == 0 { - return nil + return } toTrace := []mqttPublish{} for _, psub := range result.psubs { @@ -2726,10 +2657,7 @@ func (as *mqttAccountSessionManager) serializeRetainedMsgsForSub(rms map[string] continue } var pi uint16 - qos := mqttGetQoS(rm.Flags) - if qos > sub.mqtt.qos { - qos = sub.mqtt.qos - } + qos := min(mqttGetQoS(rm.Flags), sub.mqtt.qos) if c.mqtt.rejectQoS2Pub && qos == 2 { c.Warnf("Rejecting retained message with QoS2 for subscription %q, as configured", sub.subject) continue @@ -2763,33 +2691,35 @@ func (as *mqttAccountSessionManager) serializeRetainedMsgsForSub(rms map[string] for _, pp := range toTrace { c.traceOutOp("PUBLISH", []byte(mqttPubTrace(&pp))) } - return nil } // Appends the stored message subjects for all retained message records that // match the given subscription's `subject` (which could have wildcards). // // Account session manager NOT lock held on entry. -func (as *mqttAccountSessionManager) addRetainedSubjectsForSubject(list map[string]struct{}, topSubject string) bool { +func (as *mqttAccountSessionManager) addRetainedSubjectsForSubject(list map[string]uint64, topSubject string) { as.mu.RLock() if len(as.retmsgs) == 0 { as.mu.RUnlock() - return false + return } result := as.sl.ReverseMatch(topSubject) as.mu.RUnlock() - added := false for _, sub := range result.psubs { - subject := string(sub.subject) - if _, ok := list[subject]; ok { + if _, ok := list[string(sub.subject)]; ok { continue } - list[subject] = struct{}{} - added = true + var seq uint64 + as.mu.RLock() + if rm, ok := as.retmsgs[string(sub.subject)]; ok { + seq = rm.sseq + } + as.mu.RUnlock() + if seq > 0 { + list[string(sub.subject)] = seq + } } - - return added } type warner interface { @@ -2797,7 +2727,7 @@ type warner interface { } // Loads a list of retained messages given a list of stored message subjects. -func (as *mqttAccountSessionManager) loadRetainedMessages(subjects map[string]struct{}, w warner) map[string]*mqttRetainedMsg { +func (as *mqttAccountSessionManager) loadRetainedMessages(subjects map[string]uint64, w warner) map[string]*mqttRetainedMsg { rms := make(map[string]*mqttRetainedMsg, len(subjects)) ss := []string{} for s := range subjects { @@ -2812,6 +2742,11 @@ func (as *mqttAccountSessionManager) loadRetainedMessages(subjects map[string]st return rms } + // Although we have the stream sequence for a given subject, we still use + // the load with "last for subject" because it will cover the cases where a + // new retained message has arrived since we collected the subject/seq pair. + // If we were doing a load "by seq" and the message is not found, we would + // incorrectly remove the retained message from our map. results, err := as.jsa.loadLastMsgForMulti(mqttRetainedMsgsStreamName, ss) // If an error occurred, warn, but then proceed with what we got. if err != nil { @@ -2821,26 +2756,48 @@ func (as *mqttAccountSessionManager) loadRetainedMessages(subjects map[string]st if result == nil { continue // skip requests that timed out } - if result.ToError() != nil { - w.Warnf("failed to load retained message for subject %q: %v", ss[i], err) + if err := result.ToError(); err != nil { + // Skip the "$MQTT.rmsgs." prefix... + subj := ss[i][len(mqttRetainedMsgsStreamSubject):] + if IsNatsErr(err, JSNoMessageFoundErr) { + // If there is no message for that subject, delete from our map. + // The good thing here is that we handle the race where a retained + // message may just arrive and be replacing it in the map. The + // removeRetainedMsg() function below will not remove if the sequence + // does not match. + seq := subjects[subj] + as.removeRetainedMsg(subj, seq) + } + w.Warnf("failed to load retained message for subject %q: %v", subj, err) continue } - rm, err := mqttDecodeRetainedMessage(result.Message.Header, result.Message.Data) + rm, err := mqttDecodeRetainedMessage(result.Message.Subject, result.Message.Header, result.Message.Data) if err != nil { - w.Warnf("failed to decode retained message for subject %q: %v", ss[i], err) + // Unlikely that we can recover from that, so remove the message. + // (see comment above if failing to load the message). + subj := ss[i][len(mqttRetainedMsgsStreamSubject):] + seq := subjects[subj] + as.removeRetainedMsg(subj, seq) + w.Warnf("failed to decode retained message for subject %q: %v", subj, err) continue } // Add the loaded retained message to the cache, and to the results map. - key := ss[i][len(mqttRetainedMsgsStreamSubject):] - as.setCachedRetainedMsg(key, rm, false, false) - rms[key] = rm + // We don't need setCachedRetainedMsg() to clone the `rm.Msg` bytes slice + // since we own it. + as.setCachedRetainedMsg(rm.Subject, rm, false, false) + rms[rm.Subject] = rm } return rms } // Composes a NATS message for a storeable mqttRetainedMsg. +// If the body is empty, the flags are encoded in a way that will cause older +// servers to fail to decode the message in processRetainedMsg callback and +// will simply ignore it, which is what we want. func mqttEncodeRetainedMessage(rm *mqttRetainedMsg) (natsMsg []byte, headerLen int) { + delRM := len(rm.Msg) == 0 + // No need to encode the subject, we can restore it from topic. l := len(hdrLine) l += len(mqttNatsRetainedMessageTopic) + 1 + len(rm.Topic) + 2 // 1 byte for ':', 2 bytes for CRLF @@ -2852,7 +2809,11 @@ func mqttEncodeRetainedMessage(rm *mqttRetainedMsg) (natsMsg []byte, headerLen i } l += len(mqttNatsRetainedMessageFlags) + 1 + 2 + 2 // 1 byte for ':', 2 bytes for the flags, 2 bytes for CRLF l += 2 // 2 bytes for the extra CRLF after the header - l += len(rm.Msg) + if delRM { + l++ // Will add the delete marker before the flag + } else { + l += len(rm.Msg) + } buf := bytes.NewBuffer(make([]byte, 0, l)) @@ -2865,6 +2826,9 @@ func mqttEncodeRetainedMessage(rm *mqttRetainedMsg) (natsMsg []byte, headerLen i buf.WriteString(mqttNatsRetainedMessageFlags) buf.WriteByte(':') + if delRM { + buf.WriteByte(mqttRetainedFlagDelMarker) + } buf.WriteString(strconv.FormatUint(uint64(rm.Flags), 16)) buf.WriteString(_CRLF_) @@ -2888,30 +2852,111 @@ func mqttEncodeRetainedMessage(rm *mqttRetainedMsg) (natsMsg []byte, headerLen i return buf.Bytes(), headerLen } -func mqttDecodeRetainedMessage(h, m []byte) (*mqttRetainedMsg, error) { - fHeader := getHeader(mqttNatsRetainedMessageFlags, h) - if len(fHeader) > 0 { - flags, err := strconv.ParseUint(string(fHeader), 16, 8) - if err != nil { - return nil, fmt.Errorf("invalid retained message flags: %v", err) +func mqttSliceHeaders(headers map[string][]byte, hdr []byte) { + // Skip the hdrLine + if !bytes.HasPrefix(hdr, stringToBytes(hdrLine)) { + return + } + crLFAsBytes := stringToBytes(CR_LF) + for i := len(hdrLine); i < len(hdr); { + // Search for key/val delimiter. + del := bytes.IndexByte(hdr[i:], ':') + // Not found or key is length 0, we stop. + if del < 0 || del == i { + break + } + keyStart := i + // Walk back to remove spaces between the key and ':' if applicable. + index := keyStart + del - 1 + for index > keyStart && hdr[index] == ' ' { + index-- + } + key := hdr[keyStart : index+1] + // If what we had is only spaces, we stop. + if len(key) == 0 { + break + } + i += del + 1 + valStart := i + // Search for `\r\n`. + nl := bytes.Index(hdr[valStart:], crLFAsBytes) + // If we don't find, we stop. + if nl < 0 { + break + } + // Look if the caller is interested in this key. + if _, ok := headers[bytesToString(key)]; ok { + index := valStart + // Remove possible spaces between the ':' and the value. + for index < valStart+nl && hdr[index] == ' ' { + index++ + } + // Create a slice and limit capacity to the value range. + val := hdr[index : valStart+nl : valStart+nl] + // Record in the caller's map the value for this key. + headers[bytesToString(key)] = val + } + // Reposition to past the `\r\n`. + i += nl + 2 + } +} + +// Decodes a retained message based on the content of the header `h`. +// The returned `*mqttRetainedMsg` object will hold a reference to `m`. +// If the buffer `m` is not owned by the caller, it is the caller +// responsibility to make a copy of the byte slice. +func mqttDecodeRetainedMessage(subject string, h, m []byte) (*mqttRetainedMsg, error) { + headers := map[string][]byte{ + mqttNatsRetainedMessageOrigin: nil, + mqttNatsRetainedMessageFlags: nil, + mqttNatsRetainedMessageSource: nil, + } + var rm *mqttRetainedMsg + // Retrieve the values for the above headers. + mqttSliceHeaders(headers, h) + // Get the flag header. + fHeader := headers[mqttNatsRetainedMessageFlags] + // If we don't, it could be that this is an old retained message that + // was JSON encoded. + if len(fHeader) > 0 { + if len(fHeader) > 1 && fHeader[0] == mqttRetainedFlagDelMarker { + fHeader = fHeader[1:] + } + flagsUint, err := strconv.ParseUint(bytesToString(fHeader), 16, 8) + if err != nil { + // Since the error is currently not reported in the server, we + // will simply replace with this one. + return nil, errMQTTInvalidRetainFlags + } + rm = &mqttRetainedMsg{ + Flags: byte(flagsUint), + Origin: string(headers[mqttNatsRetainedMessageOrigin]), + Source: string(headers[mqttNatsRetainedMessageSource]), + Msg: m, } - topic := getHeader(mqttNatsRetainedMessageTopic, h) - subj, _ := mqttToNATSSubjectConversion(topic, false) - return &mqttRetainedMsg{ - Flags: byte(flags), - Subject: string(subj), - Topic: string(topic), - Origin: string(getHeader(mqttNatsRetainedMessageOrigin, h)), - Source: string(getHeader(mqttNatsRetainedMessageSource, h)), - Msg: m, - }, nil } else { - var rm mqttRetainedMsg if err := json.Unmarshal(m, &rm); err != nil { return nil, err } - return &rm, nil } + // Now check that the values are correct. + // + // For "Flags", anything at or above binary (1111) is too big. + if rm.Flags >= mqttPacketFlagMask { + return nil, errMQTTInvalidRetainFlags + } + if qos := mqttGetQoS(rm.Flags); qos > 2 { + return nil, errMQTTInvalidRetainFlags + } + // We store `Topic` in the retained message because we used to store + // all retained messages under the same subject `$MQTT_rmsgs` in + // the retained messages stream. That is no longer the case, and to + // cover setups where the retained message stream is sourced from another + // account and has some subject transforms, simply reconstruct the + // topic/subject based on the `subject` passed to this function. + rm.Subject = strings.TrimPrefix(subject, mqttRetainedMsgsStreamSubject) + rm.Topic = bytesToString(natsSubjectStrToMQTTTopic(rm.Subject)) + return rm, nil } // Creates the session stream (limit msgs of 1) for this client ID if it does @@ -2969,6 +3014,7 @@ func (as *mqttAccountSessionManager) deleteRetainedMsg(seq uint64) { // Sends a message indicating that a retained message on a given subject and stream sequence // is being removed. +// NOTE: This is maintained for backward compatibility reasons. Should be removed in 2.14/2.15? func (as *mqttAccountSessionManager) notifyRetainedMsgDeleted(subject string, seq uint64) { req := mqttRetMsgDel{ Subject: subject, @@ -3098,9 +3144,6 @@ func (as *mqttAccountSessionManager) transferRetainedToPerKeySubjectStream(log * } func (as *mqttAccountSessionManager) getCachedRetainedMsg(subject string) *mqttRetainedMsg { - if as.rmsCache == nil { - return nil - } v, ok := as.rmsCache.Load(subject) if !ok { return nil @@ -3113,8 +3156,18 @@ func (as *mqttAccountSessionManager) getCachedRetainedMsg(subject string) *mqttR return rm } -func (as *mqttAccountSessionManager) setCachedRetainedMsg(subject string, rm *mqttRetainedMsg, onlyReplace bool, copyBytesToCache bool) { - if as.rmsCache == nil || rm == nil { +// If cache is enabled, the expiration for the `rm` is bumped by +// `mqttRetainedCacheTTL` seconds. +// If `onlyReplace` is true, then the `rm` object is stored in the cache using +// the `subject` key only if there was already an object stored under that key. +// If `copyMsgBytes` is true, then the `rm.Msg` bytes are copied (because it +// references some buffer that is not owned by the caller). +// +// Note: currently `onlyReplace` and `cloneMsgBytes` always have the same +// value (all `true` or all `false`) however we use different booleans to +// better express the intent. +func (as *mqttAccountSessionManager) setCachedRetainedMsg(subject string, rm *mqttRetainedMsg, onlyReplace, copyMsgBytes bool) { + if rm == nil { return } rm.expiresFromCache = time.Now().Add(mqttRetainedCacheTTL) @@ -3123,7 +3176,7 @@ func (as *mqttAccountSessionManager) setCachedRetainedMsg(subject string, rm *mq return } } - if copyBytesToCache { + if copyMsgBytes { rm.Msg = copyBytes(rm.Msg) } as.rmsCache.Store(subject, rm) @@ -3865,6 +3918,8 @@ CHECK: ec := es.c es.c = c es.clean = cleanSess + // Clear this flag so we resubscribe to PUBREL subject is needed. + es.pubRelSubscribed = false es.mu.Unlock() if ec != nil { // Remove "will" of existing client before closing @@ -4050,16 +4105,12 @@ func mqttPubTrace(pp *mqttPublish) string { pp.topic, dup, qos, retain, pp.sz, piStr) } -// Composes a NATS message from a MQTT PUBLISH packet. The message includes an -// internal header containint the original packet's QoS, and for QoS2 packets -// the original subject. -// -// Example (QoS2, subject: "foo.bar"): -// -// NATS/1.0\r\n -// Nmqtt-Pub:2foo.bar\r\n -// \r\n -func mqttNewDeliverableMessage(pp *mqttPublish, encodePP bool) (natsMsg []byte, headerLen int) { +// mqttComputeNatsMsgSize computes the size the NATS message to be delivered +// based on a MQTT PUBLISH packet. +// encodePP: whether to encode complete MQTT PUBLISH packet header information +// - false: initial delivery (QoS 0/1) needs only base header +// - true: QoS2 storage needs to encode Nmqtt-Subject and Nmqtt-Mapped +func mqttComputeNatsMsgSize(pp *mqttPublish, encodePP bool) int { size := len(hdrLine) + len(mqttNatsHeader) + 2 + 2 + // 2 for ':', and 2 for CRLF 2 + // end-of-header CRLF @@ -4073,6 +4124,21 @@ func mqttNewDeliverableMessage(pp *mqttPublish, encodePP bool) (natsMsg []byte, len(pp.mapped) + 2 // 2 for CRLF } } + return size +} + +// Composes a NATS message from a MQTT PUBLISH packet. The message includes an +// internal header containint the original packet's QoS, and for QoS2 packets +// the original subject. +// +// Example (QoS2, subject: "foo.bar"): +// +// NATS/1.0\r\n +// Nmqtt-Pub:2foo.bar\r\n +// \r\n +func mqttNewDeliverableMessage(pp *mqttPublish, encodePP bool) (natsMsg []byte, headerLen int) { + size := mqttComputeNatsMsgSize(pp, encodePP) + buf := bytes.NewBuffer(make([]byte, 0, size)) qos := mqttGetQoS(pp.flags) @@ -4135,6 +4201,15 @@ func mqttNewDeliverablePubRel(pi uint16) (natsMsg []byte, headerLen int) { func (s *Server) mqttProcessPub(c *client, pp *mqttPublish, trace bool) error { qos := mqttGetQoS(pp.flags) + // Enforce max_payload using existing client max payload logic (mpay) by + // checking the total NATS message size that would be processed. + if maxPayload := atomic.LoadInt32(&c.mpay); maxPayload != jwt.NoLimit { + if total := mqttComputeNatsMsgSize(pp, qos == 2); total > int(maxPayload) { + c.maxPayloadViolation(total, maxPayload) + return ErrMaxPayload + } + } + switch qos { case 0: return s.mqttInitiateMsgDelivery(c, pp) @@ -4317,13 +4392,14 @@ func (c *client) mqttHandlePubRetain() { // Spec [MQTT-3.3.1-11]. Payload of size 0 removes the retained message, but // should still be delivered as a normal message. - if pp.sz == 0 { - if seqToRemove := asm.handleRetainedMsgDel(key, 0); seqToRemove > 0 { - asm.deleteRetainedMsg(seqToRemove) - asm.notifyRetainedMsgDeleted(key, seqToRemove) - } - return - } + // + // We used to delete the message here from our map, the stream, and notify + // the network about the delete. We no longer do that. Instead, we store + // the message with an empty body. When servers will get the empty body + // in processRetainedMsg, then will remove the message from their map. This + // effectively serializes all add/remove of retained messages without the + // need for "network" notifications about deletes (we still support that + // for backward compatibility but will be pulled in future releases). rm := &mqttRetainedMsg{ Origin: asm.jsa.id, @@ -4356,11 +4432,13 @@ func (c *client) mqttHandlePubRetain() { // Store the retained message with the RETAIN flag set. rm.Flags |= mqttPubFlagRetain - // Copy the payload out of pp since we will be sending the message - // asynchronously. - msg := make([]byte, pp.sz) - copy(msg, pp.msg[:pp.sz]) - asm.jsa.sendMsg(key, msg) + if pp.sz > 0 { + // Copy the payload out of pp since we will be sending the message + // asynchronously. + msg := make([]byte, pp.sz) + copy(msg, pp.msg[:pp.sz]) + asm.jsa.sendMsg(key, msg) + } } else { // isRetained // Spec [MQTT-3.3.1-5]. Store the retained message with its QoS. @@ -4374,16 +4452,8 @@ func (c *client) mqttHandlePubRetain() { // $sparkplug subject for sparkB. rm.Subject = key rmBytes, hdr := mqttEncodeRetainedMessage(rm) // will copy the payload bytes - smr, err := asm.jsa.storeMsg(mqttRetainedMsgsStreamSubject+key, hdr, rmBytes) - if err == nil { - // Update the new sequence. - rf := &mqttRetainedMsgRef{ - sseq: smr.Sequence, - } - // Add/update the map. `true` to copy the payload bytes if needs to - // update rmsCache. - asm.handleRetainedMsg(key, rf, rm, true) - } else { + _, err := asm.jsa.storeMsg(mqttRetainedMsgsStreamSubject+key, hdr, rmBytes) + if err != nil { c.mu.Lock() acc := c.acc c.mu.Unlock() @@ -4443,21 +4513,23 @@ func (s *Server) mqttCheckPubRetainedPerms() { rmsg: rf, }) } + jsaID := asm.jsa.id asm.mu.RUnlock() slices.SortFunc(rms, func(i, j retainedMsg) int { return cmp.Compare(i.rmsg.sseq, j.rmsg.sseq) }) perms := map[string]*perm{} - deletes := map[string]uint64{} for _, rf := range rms { jsm, err := asm.jsa.loadMsg(mqttRetainedMsgsStreamName, rf.rmsg.sseq) if err != nil || jsm == nil { continue } - rm, err := mqttDecodeRetainedMessage(jsm.Header, jsm.Data) + rm, err := mqttDecodeRetainedMessage(jsm.Subject, jsm.Header, jsm.Data) if err != nil { continue } - if rm.Source == _EMPTY_ { + // We deal only with messages that have a source (the username that produced + // this message) and were produced on this server. + if rm.Source == _EMPTY_ || rm.Origin != jsaID { continue } // Lookup source from global users. @@ -4466,7 +4538,7 @@ func (s *Server) mqttCheckPubRetainedPerms() { p, ok := perms[rm.Source] if !ok { p = generatePubPerms(u.Permissions) - perms[rm.Source] = p + perms[rm.Source] = p // possibly nil } // If there is permission and no longer allowed to publish in // the subject, remove the publish retained message from the map. @@ -4476,25 +4548,27 @@ func (s *Server) mqttCheckPubRetainedPerms() { } // Not present or permissions have changed such that the source can't - // publish on that subject anymore: remove it from the map. + // publish on that subject anymore: delete this retained message. if u == nil { - asm.mu.Lock() - delete(asm.retmsgs, rf.subj) - asm.sl.Remove(rf.rmsg.sub) - asm.mu.Unlock() - deletes[rf.subj] = rf.rmsg.sseq + // Set the payload to empty to notify that we are deleting this + // retained message. We will send this message async. + rm.Msg = nil + rmBytes, hdrLen := mqttEncodeRetainedMessage(rm) + asm.jsa.storeMsgNoWait(mqttRetainedMsgsStreamSubject+rm.Subject, hdrLen, rmBytes) } } - - for subject, seq := range deletes { - asm.deleteRetainedMsg(seq) - asm.notifyRetainedMsgDeleted(subject, seq) - } } } // Helper to generate only pub permissions from a Permissions object func generatePubPerms(perms *Permissions) *perm { + // If given permissions is `nil`, then it means that permissions block + // has been removed (so the user is now allowed to publish on everything) + // or was never there in the first place. Returning `nil` will let the + // caller know that there are no permissions to enforce. + if perms == nil { + return nil + } var p *perm if perms.Publish.Allow != nil { p = &perm{} @@ -4822,8 +4896,9 @@ func mqttDeliverMsgCbQoS0(sub *subscription, pc *client, _ *Account, subject, re return } topic = pc.mqtt.pp.topic - // Check for service imports where subject mapping is in play. - if len(pc.pa.mapped) > 0 && len(pc.pa.psi) > 0 { + // If the subject is different than the one in pp.subject, then some + // mapping/transform occurred and we need to recreate the topic. + if subject != bytesToString(pc.mqtt.pp.subject) { topic = natsSubjectStrToMQTTTopic(subject) } @@ -5161,6 +5236,31 @@ func (sess *mqttSession) cleanupFailedSub(c *client, sub *subscription, cc *Cons // Make sure we are set up to deliver PUBREL messages to this QoS2-subscribed // session. func (sess *mqttSession) ensurePubRelConsumerSubscription(c *client) error { + + sess.mu.Lock() + pubRelSubscribed := sess.pubRelSubscribed + pubRelDeliverySubjectB := sess.pubRelDeliverySubjectB + pubRelDeliverySubject := sess.pubRelDeliverySubject + pubRelConsumer := sess.pubRelConsumer + sess.mu.Unlock() + + // Subscribe before the consumer is created so we don't loose any messages. + if !pubRelSubscribed { + _, err := c.processSub(pubRelDeliverySubjectB, nil, pubRelDeliverySubjectB, mqttDeliverPubRelCb, false) + if err != nil { + c.Errorf("Unable to create subscription for JetStream consumer on %q: %v", pubRelDeliverySubject, err) + return err + } + sess.mu.Lock() + sess.pubRelSubscribed = true + sess.mu.Unlock() + } + + // If the JS consumer already exists, we are done. + if pubRelConsumer != nil { + return nil + } + opts := c.srv.getOpts() ackWait := opts.MQTT.AckWait if ackWait == 0 { @@ -5172,61 +5272,42 @@ func (sess *mqttSession) ensurePubRelConsumerSubscription(c *client) error { } sess.mu.Lock() - pubRelSubscribed := sess.pubRelSubscribed pubRelSubject := sess.pubRelSubject - pubRelDeliverySubjectB := sess.pubRelDeliverySubjectB - pubRelDeliverySubject := sess.pubRelDeliverySubject - pubRelConsumer := sess.pubRelConsumer tmaxack := sess.tmaxack idHash := sess.idHash id := sess.id sess.mu.Unlock() - // Subscribe before the consumer is created so we don't loose any messages. - if !pubRelSubscribed { - _, err := c.processSub(pubRelDeliverySubjectB, nil, pubRelDeliverySubjectB, - mqttDeliverPubRelCb, false) - if err != nil { - c.Errorf("Unable to create subscription for JetStream consumer on %q: %v", pubRelDeliverySubject, err) - return err - } - pubRelSubscribed = true + // Check that the limit of subs' maxAckPending are not going over the limit + if after := tmaxack + maxAckPending; after > mqttMaxAckTotalLimit { + return fmt.Errorf("max_ack_pending for all consumers would be %v which exceeds the limit of %v", + after, mqttMaxAckTotalLimit) } - // Create the consumer if needed. - if pubRelConsumer == nil { - // Check that the limit of subs' maxAckPending are not going over the limit - if after := tmaxack + maxAckPending; after > mqttMaxAckTotalLimit { - return fmt.Errorf("max_ack_pending for all consumers would be %v which exceeds the limit of %v", - after, mqttMaxAckTotalLimit) - } - - ccr := &CreateConsumerRequest{ - Stream: mqttOutStreamName, - Config: ConsumerConfig{ - DeliverSubject: pubRelDeliverySubject, - Durable: mqttPubRelConsumerDurablePrefix + idHash, - AckPolicy: AckExplicit, - DeliverPolicy: DeliverNew, - FilterSubject: pubRelSubject, - AckWait: ackWait, - MaxAckPending: maxAckPending, - MemoryStorage: opts.MQTT.ConsumerMemoryStorage, - }, - } - if opts.MQTT.ConsumerInactiveThreshold > 0 { - ccr.Config.InactiveThreshold = opts.MQTT.ConsumerInactiveThreshold - } - if _, err := sess.jsa.createDurableConsumer(ccr); err != nil { - c.Errorf("Unable to add JetStream consumer for PUBREL for client %q: err=%v", id, err) - return err - } - pubRelConsumer = &ccr.Config - tmaxack += maxAckPending + ccr := &CreateConsumerRequest{ + Stream: mqttOutStreamName, + Config: ConsumerConfig{ + DeliverSubject: pubRelDeliverySubject, + Durable: mqttPubRelConsumerDurablePrefix + idHash, + AckPolicy: AckExplicit, + DeliverPolicy: DeliverNew, + FilterSubject: pubRelSubject, + AckWait: ackWait, + MaxAckPending: maxAckPending, + MemoryStorage: opts.MQTT.ConsumerMemoryStorage, + }, } + if opts.MQTT.ConsumerInactiveThreshold > 0 { + ccr.Config.InactiveThreshold = opts.MQTT.ConsumerInactiveThreshold + } + if _, err := sess.jsa.createDurableConsumer(ccr); err != nil { + c.Errorf("Unable to add JetStream consumer for PUBREL for client %q: err=%v", id, err) + return err + } + pubRelConsumer = &ccr.Config + tmaxack += maxAckPending sess.mu.Lock() - sess.pubRelSubscribed = pubRelSubscribed sess.pubRelConsumer = pubRelConsumer sess.tmaxack = tmaxack sess.mu.Unlock() diff --git a/vendor/github.com/nats-io/nats-server/v2/server/opts.go b/vendor/github.com/nats-io/nats-server/v2/server/opts.go index f7aed1081e5..b77663de1fb 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/opts.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/opts.go @@ -62,29 +62,30 @@ type PinnedCertSet map[string]struct{} // NOTE: This structure is no longer used for monitoring endpoints // and json tags are deprecated and may be removed in the future. type ClusterOpts struct { - Name string `json:"-"` - Host string `json:"addr,omitempty"` - Port int `json:"cluster_port,omitempty"` - Username string `json:"-"` - Password string `json:"-"` - AuthTimeout float64 `json:"auth_timeout,omitempty"` - Permissions *RoutePermissions `json:"-"` - TLSTimeout float64 `json:"-"` - TLSConfig *tls.Config `json:"-"` - TLSMap bool `json:"-"` - TLSCheckKnownURLs bool `json:"-"` - TLSPinnedCerts PinnedCertSet `json:"-"` - ListenStr string `json:"-"` - Advertise string `json:"-"` - NoAdvertise bool `json:"-"` - ConnectRetries int `json:"-"` - ConnectBackoff bool `json:"-"` - PoolSize int `json:"-"` - PinnedAccounts []string `json:"-"` - Compression CompressionOpts `json:"-"` - PingInterval time.Duration `json:"-"` - MaxPingsOut int `json:"-"` - WriteDeadline time.Duration `json:"-"` + Name string `json:"-"` + Host string `json:"addr,omitempty"` + Port int `json:"cluster_port,omitempty"` + Username string `json:"-"` + Password string `json:"-"` + AuthTimeout float64 `json:"auth_timeout,omitempty"` + Permissions *RoutePermissions `json:"-"` + TLSTimeout float64 `json:"-"` + TLSConfig *tls.Config `json:"-"` + TLSMap bool `json:"-"` + TLSCheckKnownURLs bool `json:"-"` + TLSPinnedCerts PinnedCertSet `json:"-"` + ListenStr string `json:"-"` + Advertise string `json:"-"` + NoAdvertise bool `json:"-"` + ConnectRetries int `json:"-"` + ConnectBackoff bool `json:"-"` + PoolSize int `json:"-"` + PinnedAccounts []string `json:"-"` + Compression CompressionOpts `json:"-"` + PingInterval time.Duration `json:"-"` + MaxPingsOut int `json:"-"` + WriteDeadline time.Duration `json:"-"` + WriteTimeout WriteTimeoutPolicy `json:"-"` // Not exported (used in tests) resolver netResolver @@ -128,6 +129,7 @@ type GatewayOpts struct { Gateways []*RemoteGatewayOpts `json:"gateways,omitempty"` RejectUnknown bool `json:"reject_unknown,omitempty"` // config got renamed to reject_unknown_cluster WriteDeadline time.Duration `json:"-"` + WriteTimeout WriteTimeoutPolicy `json:"-"` // Not exported, for tests. resolver netResolver @@ -174,11 +176,12 @@ type LeafNodeOpts struct { // to start before falling back to previous behavior of sending the // INFO protocol first. It allows for a mix of newer remote leafnodes // that can require a TLS handshake first, and older that can't. - TLSHandshakeFirstFallback time.Duration `json:"-"` - Advertise string `json:"-"` - NoAdvertise bool `json:"-"` - ReconnectInterval time.Duration `json:"-"` - WriteDeadline time.Duration `json:"-"` + TLSHandshakeFirstFallback time.Duration `json:"-"` + Advertise string `json:"-"` + NoAdvertise bool `json:"-"` + ReconnectInterval time.Duration `json:"-"` + WriteDeadline time.Duration `json:"-"` + WriteTimeout WriteTimeoutPolicy `json:"-"` // Compression options Compression CompressionOpts `json:"-"` @@ -353,6 +356,7 @@ type Options struct { Username string `json:"-"` Password string `json:"-"` ProxyRequired bool `json:"-"` + ProxyProtocol bool `json:"-"` Authorization string `json:"-"` AuthCallout *AuthCallout `json:"-"` PingInterval time.Duration `json:"ping_interval"` @@ -383,6 +387,8 @@ type Options struct { JetStreamTpm JSTpmOpts JetStreamMaxCatchup int64 JetStreamRequestQueueLimit int64 + JetStreamMetaCompact uint64 + JetStreamMetaCompactSize uint64 StreamMaxBufferedMsgs int `json:"-"` StreamMaxBufferedSize int64 `json:"-"` StoreDir string `json:"-"` @@ -423,12 +429,13 @@ type Options struct { // to start before falling back to previous behavior of sending the // INFO protocol first. It allows for a mix of newer clients that can // require a TLS handshake first, and older clients that can't. - TLSHandshakeFirstFallback time.Duration `json:"-"` - AllowNonTLS bool `json:"-"` - WriteDeadline time.Duration `json:"-"` - MaxClosedClients int `json:"-"` - LameDuckDuration time.Duration `json:"-"` - LameDuckGracePeriod time.Duration `json:"-"` + TLSHandshakeFirstFallback time.Duration `json:"-"` + AllowNonTLS bool `json:"-"` + WriteDeadline time.Duration `json:"-"` + WriteTimeout WriteTimeoutPolicy `json:"-"` + MaxClosedClients int `json:"-"` + LameDuckDuration time.Duration `json:"-"` + LameDuckGracePeriod time.Duration `json:"-"` // MaxTracedMsgLen is the maximum printable length for traced messages. MaxTracedMsgLen int `json:"-"` @@ -588,6 +595,11 @@ type WebsocketOpts struct { // time needed for the TLS Handshake. HandshakeTimeout time.Duration + // How often to send pings to WebSocket clients. When set to a non-zero + // duration, this overrides the default PingInterval for WebSocket connections. + // If not set or zero, the server's default PingInterval will be used. + PingInterval time.Duration + // Headers to be added to the upgrade response. // Useful for adding custom headers like Strict-Transport-Security. Headers map[string]string @@ -1253,6 +1265,8 @@ func (o *Options) processConfigFileLine(k string, v any, errors *[]error, warnin o.MaxPayload = int32(v.(int64)) case "max_pending": o.MaxPending = v.(int64) + case "proxy_protocol": + o.ProxyProtocol = v.(bool) case "max_connections", "max_conn": o.MaxConn = int(v.(int64)) case "max_traced_msg_len": @@ -1347,6 +1361,8 @@ func (o *Options) processConfigFileLine(k string, v any, errors *[]error, warnin o.AllowNonTLS = v.(bool) case "write_deadline": o.WriteDeadline = parseDuration("write_deadline", tk, v, errors, warnings) + case "write_timeout": + o.WriteTimeout = parseWriteDeadlinePolicy(tk, v.(string), errors) case "lame_duck_duration": dur, err := time.ParseDuration(v.(string)) if err != nil { @@ -1674,7 +1690,7 @@ func (o *Options) processConfigFileLine(k string, v any, errors *[]error, warnin case "reconnect_error_reports": o.ReconnectErrorReports = int(v.(int64)) case "websocket", "ws": - if err := parseWebsocket(tk, o, errors); err != nil { + if err := parseWebsocket(tk, o, errors, warnings); err != nil { *errors = append(*errors, err) return } @@ -1828,6 +1844,21 @@ func parseDuration(field string, tk token, v any, errors *[]error, warnings *[]e } } +func parseWriteDeadlinePolicy(tk token, v string, errors *[]error) WriteTimeoutPolicy { + switch v { + case "default": + return WriteTimeoutPolicyDefault + case "close": + return WriteTimeoutPolicyClose + case "retry": + return WriteTimeoutPolicyRetry + default: + err := &configErr{tk, "write_timeout must be 'default', 'close' or 'retry'"} + *errors = append(*errors, err) + return WriteTimeoutPolicyDefault + } +} + func trackExplicitVal(pm *map[string]bool, name string, val bool) { m := *pm if m == nil { @@ -2004,6 +2035,8 @@ func parseCluster(v any, opts *Options, errors *[]error, warnings *[]error) erro opts.Cluster.MaxPingsOut = int(mv.(int64)) case "write_deadline": opts.Cluster.WriteDeadline = parseDuration("write_deadline", tk, mv, errors, warnings) + case "write_timeout": + opts.Cluster.WriteTimeout = parseWriteDeadlinePolicy(tk, mv.(string), errors) default: if !tk.IsUsedVariable() { err := &unknownConfigFieldErr{ @@ -2194,6 +2227,8 @@ func parseGateway(v any, o *Options, errors *[]error, warnings *[]error) error { o.Gateway.RejectUnknown = mv.(bool) case "write_deadline": o.Gateway.WriteDeadline = parseDuration("write_deadline", tk, mv, errors, warnings) + case "write_timeout": + o.Gateway.WriteTimeout = parseWriteDeadlinePolicy(tk, mv.(string), errors) default: if !tk.IsUsedVariable() { err := &unknownConfigFieldErr{ @@ -2603,6 +2638,21 @@ func parseJetStream(v any, opts *Options, errors *[]error, warnings *[]error) er return &configErr{tk, fmt.Sprintf("Expected a parseable size for %q, got %v", mk, mv)} } opts.JetStreamRequestQueueLimit = lim + case "meta_compact": + thres, ok := mv.(int64) + if !ok || thres < 0 { + return &configErr{tk, fmt.Sprintf("Expected an absolute size for %q, got %v", mk, mv)} + } + opts.JetStreamMetaCompact = uint64(thres) + case "meta_compact_size": + s, err := getStorageSize(mv) + if err != nil { + return &configErr{tk, fmt.Sprintf("%s %s", strings.ToLower(mk), err)} + } + if s < 0 { + return &configErr{tk, fmt.Sprintf("Expected an absolute size for %q, got %v", mk, mv)} + } + opts.JetStreamMetaCompactSize = uint64(s) default: if !tk.IsUsedVariable() { err := &unknownConfigFieldErr{ @@ -2719,6 +2769,8 @@ func parseLeafNodes(v any, opts *Options, errors *[]error, warnings *[]error) er opts.LeafNode.IsolateLeafnodeInterest = mv.(bool) case "write_deadline": opts.LeafNode.WriteDeadline = parseDuration("write_deadline", tk, mv, errors, warnings) + case "write_timeout": + opts.LeafNode.WriteTimeout = parseWriteDeadlinePolicy(tk, mv.(string), errors) default: if !tk.IsUsedVariable() { err := &unknownConfigFieldErr{ @@ -2889,6 +2941,7 @@ func parseRemoteLeafNodes(v any, errors *[]error, warnings *[]error) ([]*RemoteL continue } remote := &RemoteLeafOpts{} + var proxyToken token for k, v := range rm { tk, v = unwrapValue(v, <) switch strings.ToLower(k) { @@ -3022,7 +3075,7 @@ func parseRemoteLeafNodes(v any, errors *[]error, warnings *[]error) ([]*RemoteL continue } // Capture the token for the "proxy" field itself, before the map iteration - proxyToken := tk + proxyToken = tk for pk, pv := range proxyMap { tk, pv = unwrapValue(pv, <) switch strings.ToLower(pk) { @@ -3047,16 +3100,6 @@ func parseRemoteLeafNodes(v any, errors *[]error, warnings *[]error) ([]*RemoteL } } } - // Use the saved proxy token for validation errors, not the last field token - if warns, err := validateLeafNodeProxyOptions(remote); err != nil { - *errors = append(*errors, &configErr{proxyToken, err.Error()}) - continue - } else { - // Add any warnings about proxy configuration - for _, warn := range warns { - *warnings = append(*warnings, &configErr{proxyToken, warn}) - } - } default: if !tk.IsUsedVariable() { err := &unknownConfigFieldErr{ @@ -3070,6 +3113,16 @@ func parseRemoteLeafNodes(v any, errors *[]error, warnings *[]error) ([]*RemoteL } } } + // Use the saved proxy token for validation errors, not the last field token + if warns, err := validateLeafNodeProxyOptions(remote); err != nil { + *errors = append(*errors, &configErr{proxyToken, err.Error()}) + continue + } else { + // Add any warnings about proxy configuration + for _, warn := range warns { + *warnings = append(*warnings, &configErr{proxyToken, warn}) + } + } remotes = append(remotes, remote) } return remotes, nil @@ -5265,7 +5318,7 @@ func parseStringArray(fieldName string, tk token, lt *token, mv any, errors *[]e } } -func parseWebsocket(v any, o *Options, errors *[]error) error { +func parseWebsocket(v any, o *Options, errors *[]error, warnings *[]error) error { var lt token defer convertPanicToErrorList(<, errors) @@ -5366,6 +5419,8 @@ func parseWebsocket(v any, o *Options, errors *[]error) error { o.Websocket.Headers[key] = headerValue } } + case "ping_interval": + o.Websocket.PingInterval = parseDuration("ping_interval", tk, mv, errors, warnings) default: if !tk.IsUsedVariable() { err := &unknownConfigFieldErr{ diff --git a/vendor/github.com/nats-io/nats-server/v2/server/raft.go b/vendor/github.com/nats-io/nats-server/v2/server/raft.go index 7ea1ee54c85..8d845143de4 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/raft.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/raft.go @@ -19,7 +19,6 @@ import ( "encoding/binary" "errors" "fmt" - "hash" "math" "math/rand" "net" @@ -70,6 +69,7 @@ type RaftNode interface { UpdateKnownPeers(knownPeers []string) ProposeAddPeer(peer string) error ProposeRemovePeer(peer string) error + MembershipChangeInProgress() bool AdjustClusterSize(csz int) error AdjustBootClusterSize(csz int) error ClusterSize() int @@ -153,7 +153,7 @@ type raft struct { state atomic.Int32 // RaftState leaderState atomic.Bool // Is in (complete) leader state. leaderSince atomic.Pointer[time.Time] // How long since becoming leader. - hh hash.Hash64 // Highwayhash, used for snapshots + hh *highwayhash.Digest64 // Highwayhash, used for snapshots snapfile string // Snapshot filename csz int // Cluster size @@ -230,6 +230,7 @@ type raft struct { observer bool // The node is observing, i.e. not able to become leader initializing bool // The node is new, and "empty log" checks can be temporarily relaxed. scaleUp bool // The node is part of a scale up, puts us in observer mode until the log contains data. + membChanging bool // There is a membership change proposal in progress } type proposedEntry struct { @@ -305,6 +306,7 @@ var ( errEntryLoadFailed = errors.New("raft: could not load entry from WAL") errEntryStoreFailed = errors.New("raft: could not store entry to WAL") errNodeClosed = errors.New("raft: node is closed") + errNodeRemoved = errors.New("raft: peer was removed") errBadSnapName = errors.New("raft: snapshot name could not be parsed") errNoSnapAvailable = errors.New("raft: no snapshot available") errCatchupsRunning = errors.New("raft: snapshot can not be installed while catchups running") @@ -316,6 +318,8 @@ var ( errTooManyEntries = errors.New("raft: append entry can contain a max of 64k entries") errBadAppendEntry = errors.New("raft: append entry corrupt") errNoInternalClient = errors.New("raft: no internal client") + errMembershipChange = errors.New("raft: membership change in progress") + errRemoveLastNode = errors.New("raft: cannot remove the last peer") ) // This will bootstrap a raftNode by writing its config into the store directory. @@ -447,7 +451,7 @@ func (s *Server) initRaftNode(accName string, cfg *RaftConfig, labels pprofLabel // Set up the highwayhash for the snapshots. key := sha256.Sum256([]byte(n.group)) - n.hh, _ = highwayhash.New64(key[:]) + n.hh, _ = highwayhash.NewDigest64(key[:]) // If we have a term and vote file (tav.idx on the filesystem) then read in // what we think the term and vote was. It's possible these are out of date @@ -920,66 +924,72 @@ func (n *raft) ForwardProposal(entry []byte) error { // ProposeAddPeer is called to add a peer to the group. func (n *raft) ProposeAddPeer(peer string) error { - n.RLock() + n.Lock() // Check state under lock, we might not be leader anymore. if n.State() != Leader { - n.RUnlock() + n.Unlock() return errNotLeader } // Error if we had a previous write error. if werr := n.werr; werr != nil { - n.RUnlock() + n.Unlock() return werr } + if n.membChanging { + n.Unlock() + return errMembershipChange + } prop := n.prop - n.RUnlock() + n.membChanging = true + n.Unlock() prop.push(newProposedEntry(newEntry(EntryAddPeer, []byte(peer)), _EMPTY_)) return nil } -// As a leader if we are proposing to remove a peer assume its already gone. -func (n *raft) doRemovePeerAsLeader(peer string) { - n.Lock() - if n.removed == nil { - n.removed = map[string]time.Time{} - } - n.removed[peer] = time.Now() - if _, ok := n.peers[peer]; ok { - delete(n.peers, peer) - // We should decrease our cluster size since we are tracking this peer and the peer is most likely already gone. - n.adjustClusterSizeAndQuorum() - } - n.Unlock() -} - // ProposeRemovePeer is called to remove a peer from the group. func (n *raft) ProposeRemovePeer(peer string) error { - n.RLock() - prop, subj := n.prop, n.rpsubj - isLeader := n.State() == Leader - werr := n.werr - n.RUnlock() + n.Lock() // Error if we had a previous write error. - if werr != nil { + if werr := n.werr; werr != nil { + n.Unlock() return werr } - // If we are the leader then we are responsible for processing the - // peer remove and then notifying the rest of the group that the - // peer was removed. - if isLeader { - prop.push(newProposedEntry(newEntry(EntryRemovePeer, []byte(peer)), _EMPTY_)) - n.doRemovePeerAsLeader(peer) + if n.State() != Leader { + subj := n.rpsubj + n.Unlock() + + // Forward the proposal to the leader + n.sendRPC(subj, _EMPTY_, []byte(peer)) return nil } - // Otherwise we need to forward the proposal to the leader. - n.sendRPC(subj, _EMPTY_, []byte(peer)) + if n.membChanging { + n.Unlock() + return errMembershipChange + } + + if len(n.peers) <= 1 { + n.Unlock() + return errRemoveLastNode + } + + prop := n.prop + n.membChanging = true + n.Unlock() + + prop.push(newProposedEntry(newEntry(EntryRemovePeer, []byte(peer)), _EMPTY_)) return nil } +func (n *raft) MembershipChangeInProgress() bool { + n.RLock() + defer n.RUnlock() + return n.membChanging +} + // ClusterSize reports back the total cluster size. // This effects quorum etc. func (n *raft) ClusterSize() int { @@ -1118,13 +1128,15 @@ func (n *raft) ResumeApply() { func (n *raft) DrainAndReplaySnapshot() bool { n.Lock() defer n.Unlock() - n.warn("Draining and replaying snapshot") snap, err := n.loadLastSnapshot() if err != nil { return false } + n.warn("Draining and replaying snapshot") n.pauseApplyLocked() n.apply.drain() + // Cancel after draining, we might have sent EntryCatchup and need to get them the nil entry. + n.cancelCatchup() n.commit = snap.lastIndex n.apply.push(newCommittedEntry(n.commit, []*Entry{{EntrySnapshot, snap.data}})) return true @@ -1225,7 +1237,8 @@ func (n *raft) encodeSnapshot(snap *snapshot) []byte { // Now do the hash for the end. n.hh.Reset() n.hh.Write(buf[:wi]) - checksum := n.hh.Sum(nil) + var hb [highwayhash.Size64]byte + checksum := n.hh.Sum(hb[:0]) copy(buf[wi:], checksum) wi += len(checksum) return buf[:wi] @@ -1450,7 +1463,8 @@ func (n *raft) loadLastSnapshot() (*snapshot, error) { lchk := buf[hoff:] n.hh.Reset() n.hh.Write(buf[:hoff]) - if !bytes.Equal(lchk[:], n.hh.Sum(nil)) { + var hb [highwayhash.Size64]byte + if !bytes.Equal(lchk[:], n.hh.Sum(hb[:0])) { n.warn("Snapshot corrupt, checksums did not match") os.Remove(n.snapfile) n.snapfile = _EMPTY_ @@ -2377,6 +2391,10 @@ const ( EntryRemovePeer EntryLeaderTransfer EntrySnapshot + // EntryCatchup signals an internal type used to signal a Raft-level catchup has started. + // After the catchup completes (or is canceled), a nil entry will be sent to signal this. + // This type of entry is purely internal and not transmitted between peers or stored in the log. + EntryCatchup ) func (t EntryType) String() string { @@ -2404,6 +2422,15 @@ type Entry struct { Data []byte } +func (e *Entry) ChangesMembership() bool { + switch e.Type { + case EntryAddPeer, EntryRemovePeer: + return true + default: + return false + } +} + func (ae *appendEntry) String() string { return fmt.Sprintf("&{leader:%s term:%d commit:%d pterm:%d pindex:%d entries: %d}", ae.leader, ae.term, ae.commit, ae.pterm, ae.pindex, len(ae.entries)) @@ -2620,6 +2647,95 @@ func (n *raft) handleForwardedProposal(sub *subscription, c *client, _ *Account, prop.push(newProposedEntry(newEntry(EntryNormal, msg), reply)) } +// Adds peer with the given id to our membership, +// and adjusts cluster size and quorum accordingly. +// Lock should be held. +func (n *raft) addPeer(peer string) { + // If we were on the removed list reverse that here. + if n.removed != nil { + delete(n.removed, peer) + } + + if lp, ok := n.peers[peer]; !ok { + // We are not tracking this one automatically so we need + // to bump cluster size. + n.peers[peer] = &lps{time.Time{}, 0, true} + } else { + // Mark as added. + lp.kp = true + } + + // Adjust cluster size and quorum if needed. + n.adjustClusterSizeAndQuorum() + // Write out our new state. + n.writePeerState(&peerState{n.peerNames(), n.csz, n.extSt}) +} + +// Remove the peer with the given id from our membership, +// and adjusts cluster size and quorum accordingly. +// Lock should be held. +func (n *raft) removePeer(peer string) { + if n.removed == nil { + n.removed = map[string]time.Time{} + } + n.removed[peer] = time.Now() + if _, ok := n.peers[peer]; ok { + delete(n.peers, peer) + n.adjustClusterSizeAndQuorum() + n.writePeerState(&peerState{n.peerNames(), n.csz, n.extSt}) + } +} + +// Build and send appendEntry request for the given entry that changes +// membership (EntryAddPeer / EntryRemovePeer). +// Returns true if the entry made it to the WAL and was sent to the followers +func (n *raft) sendMembershipChange(e *Entry) bool { + n.Lock() + defer n.Unlock() + + // Only makes sense to call this with entries that change membership + if !e.ChangesMembership() { + return false + } + + err := n.sendAppendEntryLocked([]*Entry{e}, true) + if err != nil { + n.membChanging = false + return false + } + + if e.Type == EntryAddPeer { + n.addPeer(string(e.Data)) + } + + if e.Type == EntryRemovePeer { + n.removePeer(string(e.Data)) + if n.csz == 1 { + n.tryCommit(n.pindex) + return true + } + } + return true +} + +// logContainsUncommittedMembershipChange returns true if the +// log contains uncommitted entries that change membership. +// Lock should be held. +func (n *raft) logContainsUncommittedMembershipChange() (bool, error) { + for i := n.commit + 1; i <= n.pindex; i++ { + ae, err := n.loadEntry(i) + if err != nil { + return false, err + } + if len(ae.entries) > 0 && ae.entries[0].ChangesMembership() { + ae.returnToPool() + return true, nil + } + ae.returnToPool() + } + return false, nil +} + func (n *raft) runAsLeader() { if n.State() == Closed { return @@ -2628,6 +2744,22 @@ func (n *raft) runAsLeader() { n.Lock() psubj, rpsubj := n.psubj, n.rpsubj + // Check if there are any uncommitted membership changes. + // If so, we need to make sure we don't propose any new + // ones until those are committed. + found, err := n.logContainsUncommittedMembershipChange() + if err != nil { + n.warn("Error while looking for membership changes in WAL: %v", err) + n.stepdownLocked(noLeader) + n.Unlock() + return + + } + if found { + n.membChanging = true + n.debug("Log contains uncommitted membership change") + } + // For forwarded proposals, both normal and remove peer proposals. fsub, err := n.subscribe(psubj, n.handleForwardedProposal) if err != nil { @@ -2652,9 +2784,6 @@ func (n *raft) runAsLeader() { n.unsubscribe(rpsub) n.Unlock() }() - - // To send out our initial peer state. - n.sendPeerState() n.Unlock() hb := time.NewTicker(hbInterval) @@ -2682,8 +2811,9 @@ func (n *raft) runAsLeader() { es, sz := n.prop.pop(), 0 for _, b := range es { - if b.Type == EntryRemovePeer { - n.doRemovePeerAsLeader(string(b.Data)) + if b.ChangesMembership() { + n.sendMembershipChange(b.Entry) + continue } entries = append(entries, b.Entry) // Increment size. @@ -3061,8 +3191,19 @@ func (n *raft) applyCommit(index uint64) error { n.commit = index ae.buf = nil - var committed []*Entry + + defer func() { + // Pass to the upper layers if we have normal entries. It is + // entirely possible that 'committed' might be an empty slice here, + // which will happen if we've processed updates inline (like peer + // states). In which case the upper layer will just call down with + // Applied() with no further action. + n.apply.push(newCommittedEntry(index, committed)) + // Place back in the pool. + ae.returnToPool() + }() + for _, e := range ae.entries { switch e.Type { case EntryNormal: @@ -3072,6 +3213,15 @@ func (n *raft) applyCommit(index uint64) error { committed = append(committed, newEntry(EntrySnapshot, e.Data)) case EntrySnapshot: committed = append(committed, e) + // If we have no snapshot, install the leader's snapshot as our own. + if len(ae.entries) == 1 && n.snapfile == _EMPTY_ && ae.commit > 0 { + n.installSnapshot(&snapshot{ + lastTerm: ae.pterm, + lastIndex: ae.commit, + peerstate: encodePeerState(&peerState{n.peerNames(), n.csz, n.extSt}), + data: e.Data, + }) + } case EntryPeerState: if n.State() != Leader { if ps, err := decodePeerState(e.Data); err == nil { @@ -3085,82 +3235,78 @@ func (n *raft) applyCommit(index uint64) error { // Store our peer in our global peer map for all peers. peers.LoadOrStore(newPeer, newPeer) - // If we were on the removed list reverse that here. - if n.removed != nil { - delete(n.removed, newPeer) - } + n.addPeer(newPeer) - if lp, ok := n.peers[newPeer]; !ok { - // We are not tracking this one automatically so we need to bump cluster size. - n.peers[newPeer] = &lps{time.Time{}, 0, true} - } else { - // Mark as added. - lp.kp = true - } - // Adjust cluster size and quorum if needed. - n.adjustClusterSizeAndQuorum() - // Write out our new state. - n.writePeerState(&peerState{n.peerNames(), n.csz, n.extSt}) // We pass these up as well. committed = append(committed, e) + // We are done with this membership change + n.membChanging = false + case EntryRemovePeer: peer := string(e.Data) n.debug("Removing peer %q", peer) - // Make sure we have our removed map. - if n.removed == nil { - n.removed = make(map[string]time.Time) - } - n.removed[peer] = time.Now() - - if _, ok := n.peers[peer]; ok { - delete(n.peers, peer) - // We should decrease our cluster size since we are tracking this peer. - n.adjustClusterSizeAndQuorum() - // Write out our new state. - n.writePeerState(&peerState{n.peerNames(), n.csz, n.extSt}) - } - - // If this is us and we are the leader we should attempt to stepdown. - if peer == n.id && n.State() == Leader { - n.stepdownLocked(n.selectNextLeader()) - } + n.removePeer(peer) // Remove from string intern map. peers.Delete(peer) // We pass these up as well. committed = append(committed, e) + + // We are done with this membership change + n.membChanging = false + + // If this is us and we are the leader signal the caller + // to attempt to stepdown. + if peer == n.id && n.State() == Leader { + return errNodeRemoved + } } } - // Pass to the upper layers if we have normal entries. It is - // entirely possible that 'committed' might be an empty slice here, - // which will happen if we've processed updates inline (like peer - // states). In which case the upper layer will just call down with - // Applied() with no further action. - n.apply.push(newCommittedEntry(index, committed)) - // Place back in the pool. - ae.returnToPool() return nil } -// Used to track a success response and apply entries. -func (n *raft) trackResponse(ar *appendEntryResponse) { - if n.State() == Closed { - return +// Check if there is a quorum for the given index, and if +// so, commit the corresponding entry. +// Return true if the index was committed, false otherwise. +// Lock should be held. +func (n *raft) tryCommit(index uint64) (bool, error) { + acks := len(n.acks[index]) + // Count the leader if it's still part of membership + if n.peers[n.ID()] != nil { + acks += 1 } + if acks < n.qn { + return false, nil + } + // We have a quorum + for i := n.commit + 1; i <= index; i++ { + if err := n.applyCommit(i); err != nil { + if err != errNodeClosed && err != errNodeRemoved { + n.error("Got an error applying commit for %d: %v", i, err) + } + return false, err + } + } + return true, nil +} - n.Lock() - +// Used to track a success response. Returns true if the +// response was tracked, false if the response was ignored +// (the response is old, the index is already committed, ...) +// Lock should be held. +func (n *raft) trackResponse(ar *appendEntryResponse) bool { // Check state under lock, we might not be leader anymore. if n.State() != Leader { - n.Unlock() - return + return false } + ps := n.peers[ar.peer] + // Update peer's last index. - if ps := n.peers[ar.peer]; ps != nil && ar.index > ps.li { + if ps != nil && ar.index > ps.li { ps.li = ar.index } @@ -3171,13 +3317,15 @@ func (n *raft) trackResponse(ar *appendEntryResponse) { // Ignore items already committed. if ar.index <= n.commit { - n.Unlock() - return + return false } - // See if we have items to apply. - var sendHB bool + // Not a peer, can't count this message towards quorum + if ps == nil { + return false + } + // Keep track of the response results := n.acks[ar.index] if results == nil { results = make(map[string]struct{}) @@ -3185,22 +3333,7 @@ func (n *raft) trackResponse(ar *appendEntryResponse) { } results[ar.peer] = struct{}{} - // We don't count ourselves to account for leader changes, so add 1. - if nr := len(results); nr+1 >= n.qn { - // We have a quorum. - for index := n.commit + 1; index <= ar.index; index++ { - if err := n.applyCommit(index); err != nil && err != errNodeClosed { - n.error("Got an error applying commit for %d: %v", index, err) - break - } - } - sendHB = n.prop.len() == 0 - } - n.Unlock() - - if sendHB { - n.sendHeartbeat() - } + return true } // Used to adjust cluster size and peer count based on added official peers. @@ -3249,8 +3382,6 @@ func (n *raft) trackPeer(peer string) error { } if ps := n.peers[peer]; ps != nil { ps.ts = time.Now() - } else if !isRemoved { - n.peers[peer] = &lps{time.Now(), 0, false} } n.Unlock() @@ -3367,6 +3498,8 @@ func (n *raft) cancelCatchup() { if n.catchup != nil && n.catchup.sub != nil { n.unsubscribe(n.catchup.sub) + // Send nil entry to signal the upper layers we are done catching up. + n.apply.push(nil) } n.catchup = nil } @@ -3394,6 +3527,9 @@ func (n *raft) createCatchup(ae *appendEntry) string { // Cleanup any old ones. if n.catchup != nil && n.catchup.sub != nil { n.unsubscribe(n.catchup.sub) + } else { + // Signal to the upper layer that the following entries are catchup entries, up until the nil guard. + n.apply.push(newCommittedEntry(0, []*Entry{{EntryCatchup, nil}})) } // Snapshot term and index. n.catchup = &catchupState{ @@ -3477,6 +3613,7 @@ func (n *raft) resetWAL() { // Lock should be held func (n *raft) updateLeader(newLeader string) { + wasLeader := n.leader == n.id n.leader = newLeader n.hasleader.Store(newLeader != _EMPTY_) if !n.pleader.Load() && newLeader != noLeader { @@ -3493,9 +3630,9 @@ func (n *raft) updateLeader(newLeader string) { } } // Reset last seen timestamps. - // If we're the leader we track everyone, and don't reset. + // If we are (or were) the leader we track(ed) everyone, and don't reset. // But if we're a follower we only track the leader, and reset all others. - if newLeader != n.id { + if newLeader != n.id && !wasLeader { for peer, ps := range n.peers { if peer == newLeader { continue @@ -3669,8 +3806,6 @@ func (n *raft) processAppendEntry(ae *appendEntry, sub *subscription) { if isNew && ae.leader != noLeader && ae.leader == n.leader { if ps := n.peers[ae.leader]; ps != nil { ps.ts = time.Now() - } else { - n.peers[ae.leader] = &lps{time.Now(), 0, true} } } @@ -3929,7 +4064,29 @@ func (n *raft) processAppendEntryResponse(ar *appendEntryResponse) { // The remote node successfully committed the append entry. // They agree with our leadership and are happy with the state of the log. // In this case ar.term doesn't matter. - n.trackResponse(ar) + var err error + var committed bool + + n.Lock() + if n.trackResponse(ar) { + committed, err = n.tryCommit(ar.index) + } + n.Unlock() + + // Leader was peer-removed. Attempt a step-down to + // a new leader before shutting down. + if err == errNodeRemoved { + n.StepDown() + n.Stop() + } + + // Send a heartbeat if there is no other message lined + // up, so that followers can apply without waiting for + // the next message. + if committed && n.prop.len() == 0 { + n.sendHeartbeat() + } + arPool.Put(ar) } else if ar.reply != _EMPTY_ { // The remote node didn't commit the append entry, and they believe they @@ -4022,12 +4179,15 @@ func (n *raft) sendAppendEntry(entries []*Entry) { defer n.Unlock() n.sendAppendEntryLocked(entries, true) } -func (n *raft) sendAppendEntryLocked(entries []*Entry, checkLeader bool) { + +// Returns nil if an appendEntry was appended to our WAL and sent to followers, +// an error otherwise. +func (n *raft) sendAppendEntryLocked(entries []*Entry, checkLeader bool) error { // Safeguard against sending an append entry right after a stepdown from a different goroutine. // Specifically done while holding the lock to not race. if checkLeader && n.State() != Leader { n.debug("Not sending append entry, not leader") - return + return errNotLeader } ae := n.buildAppendEntry(entries) @@ -4035,14 +4195,14 @@ func (n *raft) sendAppendEntryLocked(entries []*Entry, checkLeader bool) { var scratch [1024]byte ae.buf, err = ae.encode(scratch[:]) if err != nil { - return + return err } // If we have entries store this in our wal. shouldStore := ae.shouldStore() if shouldStore { if err := n.storeToWAL(ae); err != nil { - return + return err } n.active = time.Now() n.cachePendingEntry(ae) @@ -4051,6 +4211,7 @@ func (n *raft) sendAppendEntryLocked(entries []*Entry, checkLeader bool) { if !shouldStore { ae.returnToPool() } + return nil } // cachePendingEntry saves append entries in memory for faster processing during applyCommit. @@ -4582,6 +4743,8 @@ func (n *raft) switchToFollowerLocked(leader string) { n.leaderState.Store(false) n.leaderSince.Store(nil) n.lxfer = false + n.membChanging = false + // Reset acks, we can't assume acks from a previous term are still valid in another term. if len(n.acks) > 0 { n.acks = make(map[uint64]map[string]struct{}) @@ -4627,37 +4790,19 @@ func (n *raft) switchToLeader() { } n.Lock() + defer n.Unlock() n.debug("Switching to leader") - // Check if we have items pending as we are taking over. - sendHB := n.pindex > n.commit - n.lxfer = false n.updateLeader(n.id) - leadChange := n.switchState(Leader) + n.switchState(Leader) - if leadChange { - // Wait for messages to be applied if we've stored more, otherwise signal immediately. - // It's important to wait signaling we're leader if we're not up-to-date yet, as that - // would mean we're in a consistent state compared with the previous leader. - if n.pindex > n.applied { - n.aflr = n.pindex - } else { - // We know we have applied all entries in our log and can signal immediately. - // For sanity reset applied floor back down to 0, so we aren't able to signal twice. - n.aflr = 0 - if !n.leaderState.Swap(true) { - // Only update timestamp if leader state actually changed. - nowts := time.Now().UTC() - n.leaderSince.Store(&nowts) - } - n.updateLeadChange(true) - } - } - n.Unlock() - - if sendHB { - n.sendHeartbeat() - } + // To send out our initial peer state. + // In our implementation this is equivalent to sending a NOOP-entry upon becoming leader. + // Wait for this message (and potentially more) to be applied. + // It's important to wait signaling we're leader if we're not up-to-date yet, as that + // would mean we're in a consistent state compared with the previous leader. + n.sendPeerState() + n.aflr = n.pindex } diff --git a/vendor/github.com/nats-io/nats-server/v2/server/reload.go b/vendor/github.com/nats-io/nats-server/v2/server/reload.go index 89594d1b9e3..7afe29b80f1 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/reload.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/reload.go @@ -1257,9 +1257,9 @@ func imposeOrder(value any) error { slices.SortFunc(value.Gateways, func(i, j *RemoteGatewayOpts) int { return cmp.Compare(i.Name, j.Name) }) case WebsocketOpts: slices.Sort(value.AllowedOrigins) - case string, bool, uint8, uint16, int, int32, int64, time.Duration, float64, nil, LeafNodeOpts, ClusterOpts, *tls.Config, PinnedCertSet, + case string, bool, uint8, uint16, uint64, int, int32, int64, time.Duration, float64, nil, LeafNodeOpts, ClusterOpts, *tls.Config, PinnedCertSet, *URLAccResolver, *MemAccResolver, *DirAccResolver, *CacheDirAccResolver, Authentication, MQTTOpts, jwt.TagList, - *OCSPConfig, map[string]string, JSLimitOpts, StoreCipher, *OCSPResponseCacheConfig, *ProxiesConfig: + *OCSPConfig, map[string]string, JSLimitOpts, StoreCipher, *OCSPResponseCacheConfig, *ProxiesConfig, WriteTimeoutPolicy: // explicitly skipped types case *AuthCallout: case JSTpmOpts: @@ -1659,6 +1659,8 @@ func (s *Server) diffOptions(newOpts *Options) ([]option, error) { return nil, fmt.Errorf("config reload not supported for jetstream max memory and store") } } + case "jetstreammetacompact", "jetstreammetacompactsize": + // Allowed at runtime but monitorCluster looks at s.opts directly, so no further work needed here. case "websocket": // Similar to gateways tmpOld := oldValue.(WebsocketOpts) @@ -2124,9 +2126,6 @@ func (s *Server) reloadAuthorization() { resetCh <- struct{}{} } - // Check that publish retained messages sources are still allowed to publish. - s.mqttCheckPubRetainedPerms() - // Close clients that have moved accounts for _, client := range cclients { client.closeConnection(ClientClosed) @@ -2166,6 +2165,10 @@ func (s *Server) reloadAuthorization() { s.Errorf(err.Error()) } } + + // Check that publish retained messages sources are still allowed to publish. + // Do this after dealing with JetStream. + s.mqttCheckPubRetainedPerms() } // Returns true if given client current account has changed (or user diff --git a/vendor/github.com/nats-io/nats-server/v2/server/route.go b/vendor/github.com/nats-io/nats-server/v2/server/route.go index 008e6ede51c..b5850ecd354 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/route.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/route.go @@ -1031,7 +1031,7 @@ func (s *Server) sendAsyncInfoToClients(regCli, wsCli bool) { c.flags.isSet(firstPongSent) { // sendInfo takes care of checking if the connection is still // valid or not, so don't duplicate tests here. - c.enqueueProto(c.generateClientInfoJSON(info)) + c.enqueueProto(c.generateClientInfoJSON(info, true)) } c.mu.Unlock() } @@ -2346,8 +2346,20 @@ func (s *Server) addRoute(c *client, didSolicit, sendDelayedInfo bool, gossipMod if doOnce { // check to be consistent and future proof. but will be same domain if s.sameDomain(info.Domain) { - s.nodeToInfo.Store(rHash, - nodeInfo{rn, s.info.Version, s.info.Cluster, info.Domain, id, nil, nil, nil, false, info.JetStream, false, false}) + s.nodeToInfo.Store(rHash, nodeInfo{ + name: rn, + version: s.info.Version, + cluster: s.info.Cluster, + domain: info.Domain, + id: id, + tags: nil, + cfg: nil, + stats: nil, + offline: false, + js: info.JetStream, + binarySnapshots: true, // Updated default to true. Versions 2.10.0+ support it. + accountNRG: false, + }) } } diff --git a/vendor/github.com/nats-io/nats-server/v2/server/server.go b/vendor/github.com/nats-io/nats-server/v2/server/server.go index fb3e472b87e..d8c8a1cb2c8 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/server.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/server.go @@ -44,6 +44,8 @@ import ( // Allow dynamic profiling. _ "net/http/pprof" + "expvar" + "github.com/klauspost/compress/s2" "github.com/nats-io/jwt/v2" "github.com/nats-io/nats-server/v2/logger" @@ -841,15 +843,18 @@ func NewServer(opts *Options) (*Server, error) { if opts.JetStream { ourNode := getHash(serverName) s.nodeToInfo.Store(ourNode, nodeInfo{ - serverName, - VERSION, - opts.Cluster.Name, - opts.JetStreamDomain, - info.ID, - opts.Tags, - &JetStreamConfig{MaxMemory: opts.JetStreamMaxMemory, MaxStore: opts.JetStreamMaxStore, CompressOK: true}, - nil, - false, true, true, true, + name: serverName, + version: VERSION, + cluster: opts.Cluster.Name, + domain: opts.JetStreamDomain, + id: info.ID, + tags: opts.Tags, + cfg: &JetStreamConfig{MaxMemory: opts.JetStreamMaxMemory, MaxStore: opts.JetStreamMaxStore, CompressOK: true}, + stats: nil, + offline: false, + js: true, + binarySnapshots: true, + accountNRG: true, }) } @@ -1076,8 +1081,8 @@ func (s *Server) serverName() string { return s.getOpts().ServerName } -// ClientURL returns the URL used to connect clients. Helpful in testing -// when we designate a random client port (-1). +// ClientURL returns the URL used to connect clients. +// Helpful in tests and with in-process servers using a random client port (-1). func (s *Server) ClientURL() string { // FIXME(dlc) - should we add in user and pass if defined single? opts := s.getOpts() @@ -1090,6 +1095,19 @@ func (s *Server) ClientURL() string { return u.String() } +// WebsocketURL returns the URL used to connect websocket clients. +// Helpful in tests and with in-process servers using a random websocket port (-1). +func (s *Server) WebsocketURL() string { + opts := s.getOpts() + var u url.URL + u.Scheme = "ws" + if opts.Websocket.TLSConfig != nil { + u.Scheme = "wss" + } + u.Host = net.JoinHostPort(opts.Websocket.Host, fmt.Sprintf("%d", opts.Websocket.Port)) + return u.String() +} + func validateCluster(o *Options) error { if o.Cluster.Name != _EMPTY_ && strings.Contains(o.Cluster.Name, " ") { return ErrClusterNameHasSpaces @@ -2049,6 +2067,13 @@ func (s *Server) setRouteInfo(acc *Account) { // associated with an account name. // Lock MUST NOT be held upon entry. func (s *Server) lookupAccount(name string) (*Account, error) { + return s.lookupOrFetchAccount(name, true) +} + +// lookupOrFetchAccount is a function to return the account structure +// associated with an account name. +// Lock MUST NOT be held upon entry. +func (s *Server) lookupOrFetchAccount(name string, fetch bool) (*Account, error) { var acc *Account if v, ok := s.accounts.Load(name); ok { acc = v.(*Account) @@ -2058,7 +2083,7 @@ func (s *Server) lookupAccount(name string) (*Account, error) { // return the latest information from the resolver. if acc.IsExpired() { s.Debugf("Requested account [%s] has expired", name) - if s.AccountResolver() != nil { + if s.AccountResolver() != nil && fetch { if err := s.updateAccount(acc); err != nil { // This error could mask expired, so just return expired here. return nil, ErrAccountExpired @@ -2070,7 +2095,7 @@ func (s *Server) lookupAccount(name string) (*Account, error) { return acc, nil } // If we have a resolver see if it can fetch the account. - if s.AccountResolver() == nil { + if s.AccountResolver() == nil || !fetch { return nil, ErrMissingAccount } return s.fetchAccount(name) @@ -2781,6 +2806,11 @@ func (s *Server) AcceptLoop(clr chan struct{}) { s.Noticef("Listening for client connections on %s", net.JoinHostPort(opts.Host, strconv.Itoa(l.Addr().(*net.TCPAddr).Port))) + // Alert if PROXY protocol is enabled + if opts.ProxyProtocol { + s.Noticef("PROXY protocol enabled for client connections") + } + // Alert of TLS enabled. if opts.TLSConfig != nil { s.Noticef("TLS required for client connections") @@ -3017,6 +3047,7 @@ const ( HealthzPath = "/healthz" IPQueuesPath = "/ipqueuesz" RaftzPath = "/raftz" + ExpvarzPath = "/debug/vars" ) func (s *Server) basePath(p string) string { @@ -3135,6 +3166,8 @@ func (s *Server) startMonitoring(secure bool) error { mux.HandleFunc(s.basePath(IPQueuesPath), s.HandleIPQueuesz) // Raftz mux.HandleFunc(s.basePath(RaftzPath), s.HandleRaftz) + // Expvarz + mux.Handle(s.basePath(ExpvarzPath), expvar.Handler()) // Do not set a WriteTimeout because it could cause cURL/browser // to return empty response or unable to display page if the @@ -3307,8 +3340,11 @@ func (s *Server) createClientEx(conn net.Conn, inProcess bool) *client { } // Decide if we are going to require TLS or not and generate INFO json. + // If we have ProxyProtocol enabled then we won't include the client + // IP in the initial INFO, as that would leak the proxy IP itself. + // In that case we'll send another INFO after the client introduces itself. tlsRequired := info.TLSRequired - infoBytes := c.generateClientInfoJSON(info) + infoBytes := c.generateClientInfoJSON(info, !opts.ProxyProtocol) // Send our information, except if TLS and TLSHandshakeFirst is requested. if !tlsFirst { @@ -3379,7 +3415,7 @@ func (s *Server) createClientEx(conn net.Conn, inProcess bool) *client { // different that the current value and regenerate infoBytes. if orgInfoTLSReq != info.TLSRequired { info.TLSRequired = orgInfoTLSReq - infoBytes = c.generateClientInfoJSON(info) + infoBytes = c.generateClientInfoJSON(info, !opts.ProxyProtocol) } c.sendProtoNow(infoBytes) // Set the boolean to false for the rest of the function. @@ -3392,7 +3428,7 @@ func (s *Server) createClientEx(conn net.Conn, inProcess bool) *client { // one the client wants. We'll always allow this for in-process // connections. if !isClosed && !tlsFirst && opts.TLSConfig != nil && (inProcess || opts.AllowNonTLS) { - pre = make([]byte, 4) + pre = make([]byte, 6) // Minimum 6 bytes for proxy proto in next step. c.nc.SetReadDeadline(time.Now().Add(secondsToDuration(opts.TLSTimeout))) n, _ := io.ReadFull(c.nc, pre[:]) c.nc.SetReadDeadline(time.Time{}) @@ -3404,6 +3440,55 @@ func (s *Server) createClientEx(conn net.Conn, inProcess bool) *client { } } + // Check for proxy protocol if enabled. + if !isClosed && !tlsRequired && opts.ProxyProtocol { + if len(pre) == 0 { + // There has been no pre-read yet, do so so we can work out + // if the client is trying to negotiate PROXY. + pre = make([]byte, 6) + c.nc.SetReadDeadline(time.Now().Add(proxyProtoReadTimeout)) + n, _ := io.ReadFull(c.nc, pre) + c.nc.SetReadDeadline(time.Time{}) + pre = pre[:n] + } + conn = &tlsMixConn{conn, bytes.NewBuffer(pre)} + addr, err := readProxyProtoHeader(conn) + if err != nil && err != errProxyProtoUnrecognized { + // err != errProxyProtoUnrecognized implies that we detected a proxy + // protocol header but we failed to parse it, so don't continue. + c.mu.Unlock() + s.Warnf("Error reading PROXY protocol header from %s: %v", conn.RemoteAddr(), err) + c.closeConnection(ProtocolViolation) + return nil + } + // If addr is nil, it was a LOCAL/UNKNOWN command (health check) + // Use the connection as-is + if addr != nil { + c.nc = &proxyConn{ + Conn: conn, + remoteAddr: addr, + } + // These were set already by initClient, override them. + c.host = addr.srcIP.String() + c.port = addr.srcPort + } + // At this point, err is either: + // - nil => we parsed the proxy protocol header successfully + // - errProxyProtoUnrecognized => we didn't detect proxy protocol at all + // We only clear the pre-read if we successfully read the protocol header + // so that the next step doesn't re-read it. Otherwise we have to assume + // that it's a non-proxied connection and we want the pre-read to remain + // for the next step. + if err == nil { + pre = nil + } + // Because we have ProxyProtocol enabled, our earlier INFO message didn't + // include the client_ip. If we need to send it again then we will include + // it, but sending it here immediately can confuse clients who have just + // PING'd. + infoBytes = c.generateClientInfoJSON(info, true) + } + // Check for TLS if !isClosed && tlsRequired { if s.connRateCounter != nil && !s.connRateCounter.allow() { @@ -4688,7 +4773,7 @@ func (s *Server) LDMClientByID(id uint64) error { // sendInfo takes care of checking if the connection is still // valid or not, so don't duplicate tests here. c.Debugf("Sending Lame Duck Mode info to client") - c.enqueueProto(c.generateClientInfoJSON(info)) + c.enqueueProto(c.generateClientInfoJSON(info, true)) return nil } else { return errors.New("client does not support Lame Duck Mode or is not ready to receive the notification") diff --git a/vendor/github.com/nats-io/nats-server/v2/server/store.go b/vendor/github.com/nats-io/nats-server/v2/server/store.go index 9819929e9a4..15700d0736f 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/store.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/store.go @@ -99,6 +99,7 @@ type StreamStore interface { LoadNextMsgMulti(sl *gsl.SimpleSublist, start uint64, smp *StoreMsg) (sm *StoreMsg, skip uint64, err error) LoadLastMsg(subject string, sm *StoreMsg) (*StoreMsg, error) LoadPrevMsg(start uint64, smp *StoreMsg) (sm *StoreMsg, err error) + LoadPrevMsgMulti(sl *gsl.SimpleSublist, start uint64, smp *StoreMsg) (sm *StoreMsg, skip uint64, err error) RemoveMsg(seq uint64) (bool, error) EraseMsg(seq uint64) (bool, error) Purge() (uint64, error) @@ -112,8 +113,8 @@ type StreamStore interface { AllLastSeqs() ([]uint64, error) MultiLastSeqs(filters []string, maxSeq uint64, maxAllowed int) ([]uint64, error) SubjectForSeq(seq uint64) (string, error) - NumPending(sseq uint64, filter string, lastPerSubject bool) (total, validThrough uint64) - NumPendingMulti(sseq uint64, sl *gsl.SimpleSublist, lastPerSubject bool) (total, validThrough uint64) + NumPending(sseq uint64, filter string, lastPerSubject bool) (total, validThrough uint64, err error) + NumPendingMulti(sseq uint64, sl *gsl.SimpleSublist, lastPerSubject bool) (total, validThrough uint64, err error) State() StreamState FastState(*StreamState) EncodedStreamState(failed uint64) (enc []byte, err error) @@ -125,7 +126,7 @@ type StreamStore interface { UpdateConfig(cfg *StreamConfig) error Delete(inline bool) error Stop() error - ConsumerStore(name string, cfg *ConsumerConfig) (ConsumerStore, error) + ConsumerStore(name string, created time.Time, cfg *ConsumerConfig) (ConsumerStore, error) AddConsumer(o ConsumerStore) error RemoveConsumer(o ConsumerStore) error Snapshot(deadline time.Duration, includeConsumers, checkMsgs bool) (*SnapshotResult, error) diff --git a/vendor/github.com/nats-io/nats-server/v2/server/stream.go b/vendor/github.com/nats-io/nats-server/v2/server/stream.go index 1329c1642a3..906f111491b 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/stream.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/stream.go @@ -827,11 +827,19 @@ func (a *Account) addStreamWithAssignment(config *StreamConfig, fsConfig *FileSt ipqLimitByLen[*inMsg](mlen), ipqLimitBySize[*inMsg](msz), ), - gets: newIPQueue[*directGetReq](s, qpfx+"direct gets"), - qch: make(chan struct{}), - mqch: make(chan struct{}), - uch: make(chan struct{}, 4), - sch: make(chan struct{}, 1), + gets: newIPQueue[*directGetReq](s, qpfx+"direct gets"), + qch: make(chan struct{}), + mqch: make(chan struct{}), + uch: make(chan struct{}, 4), + sch: make(chan struct{}, 1), + created: time.Now().UTC(), + } + + // Add created timestamp used for the store, must match that of the stream assignment if it exists. + if sa != nil { + js.mu.RLock() + mset.created = sa.Created + js.mu.RUnlock() } // Start our signaling routine to process consumers. @@ -895,7 +903,6 @@ func (a *Account) addStreamWithAssignment(config *StreamConfig, fsConfig *FileSt fsCfg.SyncAlways = false fsCfg.AsyncFlush = true } - if err := mset.setupStore(fsCfg); err != nil { mset.stop(true, false) return nil, NewJSStreamStoreFailedError(err) @@ -2012,6 +2019,18 @@ func (s *Server) checkStreamCfg(config *StreamConfig, acc *Account, pedantic boo } } + // Check the subject transform if any + if cfg.SubjectTransform != nil { + if cfg.SubjectTransform.Source != _EMPTY_ && !IsValidSubject(cfg.SubjectTransform.Source) { + return StreamConfig{}, NewJSStreamTransformInvalidSourceError(fmt.Errorf("%w %s", ErrBadSubject, cfg.SubjectTransform.Source)) + } + + err := ValidateMapping(cfg.SubjectTransform.Source, cfg.SubjectTransform.Destination) + if err != nil { + return StreamConfig{}, NewJSStreamTransformInvalidDestinationError(err) + } + } + // If we have a republish directive check if we can create a transform here. if cfg.RePublish != nil { // Check to make sure source is a valid subset of the subjects we have. @@ -2023,6 +2042,18 @@ func (s *Server) checkStreamCfg(config *StreamConfig, acc *Account, pedantic boo } cfg.RePublish.Source = fwcs } + // A RePublish from '>' to '>' could be used, normally this would form a cycle with the stream subjects. + // But if this aligns to a different subject based on the transform, we allow it still. + // The RePublish will be implicit based on the transform, but only if the transform's source + // is the only stream subject. + if cfg.RePublish.Destination == fwcs && cfg.RePublish.Source == fwcs && cfg.SubjectTransform != nil && + len(cfg.Subjects) == 1 && cfg.SubjectTransform.Source == cfg.Subjects[0] { + if pedantic { + return StreamConfig{}, NewJSPedanticError(fmt.Errorf("implicit republish based on subject transform")) + } + // RePublish all messages with the transformed subject. + cfg.RePublish.Source, cfg.RePublish.Destination = cfg.SubjectTransform.Destination, cfg.SubjectTransform.Destination + } var formsCycle bool for _, subj := range cfg.Subjects { if SubjectsCollide(cfg.RePublish.Destination, subj) { @@ -2038,18 +2069,6 @@ func (s *Server) checkStreamCfg(config *StreamConfig, acc *Account, pedantic boo } } - // Check the subject transform if any - if cfg.SubjectTransform != nil { - if cfg.SubjectTransform.Source != _EMPTY_ && !IsValidSubject(cfg.SubjectTransform.Source) { - return StreamConfig{}, NewJSStreamTransformInvalidSourceError(fmt.Errorf("%w %s", ErrBadSubject, cfg.SubjectTransform.Source)) - } - - err := ValidateMapping(cfg.SubjectTransform.Source, cfg.SubjectTransform.Destination) - if err != nil { - return StreamConfig{}, NewJSStreamTransformInvalidDestinationError(err) - } - } - // Remove placement if it's an empty object. if cfg.Placement != nil && reflect.DeepEqual(cfg.Placement, &Placement{}) { cfg.Placement = nil @@ -4074,28 +4093,61 @@ func (mset *stream) setStartingSequenceForSources(iNames map[string]struct{}) { return } + // From the provided list of sources, we build a sublist that contains + // the interested filters (including transforms). As we figure out the + // starting sequence for each source, we will eliminate the source from + // the map and then refresh the sublist, which in turn makes the sublist + // ideally more specific. This allows LoadPrevMsgsMulti to work most + // effectively. + // Because this is a SimpleSublist we can't just remove the entries per + // source so we have no other option but to rebuild it from scratch, but + // this is cheap enough to do so not the end of the world. + var sl *gsl.SimpleSublist + refreshSublist := func() { + sl = gsl.NewSimpleSublist() + for iName := range iNames { + si := mset.sources[iName] + if si == nil { + continue + } + if si.sf == _EMPTY_ { + sl.Insert(fwcs, struct{}{}) + } else { + sl.Insert(si.sf, struct{}{}) + } + for _, sf := range si.sfs { + if sf == _EMPTY_ { + sl.Insert(fwcs, struct{}{}) + } else { + sl.Insert(sf, struct{}{}) + } + } + } + } + refreshSublist() + var smv StoreMsg - for seq := state.LastSeq; seq >= state.FirstSeq; { - sm, err := mset.store.LoadPrevMsg(seq, &smv) + for last := state.LastSeq; ; { + sm, seq, err := mset.store.LoadPrevMsgMulti(sl, last, &smv) if err == ErrStoreEOF || err != nil { break } - seq = sm.seq - 1 + last = seq - 1 if len(sm.hdr) == 0 { continue } - - ss := getHeader(JSStreamSource, sm.hdr) + ss := sliceHeader(JSStreamSource, sm.hdr) if len(ss) == 0 { continue } - streamName, indexName, sseq := streamAndSeq(bytesToString(ss)) + streamName, indexName, sseq := streamAndSeq(bytesToString(ss)) if _, ok := iNames[indexName]; ok { si := mset.sources[indexName] si.sseq = sseq si.dseq = 0 delete(iNames, indexName) + refreshSublist() } else if indexName == _EMPTY_ && streamName != _EMPTY_ { for iName := range iNames { // TODO streamSource is a linear walk, to optimize later @@ -4104,6 +4156,7 @@ func (mset *stream) setStartingSequenceForSources(iNames map[string]struct{}) { si.sseq = sseq si.dseq = 0 delete(iNames, iName) + refreshSublist() break } } @@ -4185,26 +4238,61 @@ func (mset *stream) startingSequenceForSources() { } }() + // Generate a list of sources and, from that, a sublist that contains + // the interested filters (including transforms). As we figure out the + // starting sequence for each source, we will eliminate the source from + // the map and then refresh the sublist, which in turn makes the sublist + // ideally more specific. This allows LoadPrevMsgsMulti to work most + // effectively. + // Because this is a SimpleSublist we can't just remove the entries per + // source so we have no other option but to rebuild it from scratch, but + // this is cheap enough to do so not the end of the world. + sources := map[string]*StreamSource{} + for _, src := range mset.cfg.Sources { + sources[src.composeIName()] = src + } + var sl *gsl.SimpleSublist + refreshSublist := func() { + sl = gsl.NewSimpleSublist() + for _, src := range sources { + if src.FilterSubject == _EMPTY_ { + sl.Insert(fwcs, struct{}{}) + } else { + sl.Insert(src.FilterSubject, struct{}{}) + } + for _, tr := range src.SubjectTransforms { + if tr.Destination == _EMPTY_ { + sl.Insert(fwcs, struct{}{}) + } else { + sl.Insert(tr.Destination, struct{}{}) + } + } + } + } + refreshSublist() + update := func(iName string, seq uint64) { // Only update active in case we have older ones in here that got configured out. if si := mset.sources[iName]; si != nil { if _, ok := seqs[iName]; !ok { seqs[iName] = seq + delete(sources, iName) + refreshSublist() } } } var smv StoreMsg - for seq := state.LastSeq; ; { - sm, err := mset.store.LoadPrevMsg(seq, &smv) + for last := state.LastSeq; ; { + sm, seq, err := mset.store.LoadPrevMsgMulti(sl, last, &smv) if err == ErrStoreEOF || err != nil { break } - seq = sm.seq - 1 + last = seq - 1 if len(sm.hdr) == 0 { continue } - ss := getHeader(JSStreamSource, sm.hdr) + ss := sliceHeader(JSStreamSource, sm.hdr) if len(ss) == 0 { continue } @@ -4534,8 +4622,6 @@ func (mset *stream) unsubscribe(sub *subscription) { func (mset *stream) setupStore(fsCfg *FileStoreConfig) error { mset.mu.Lock() - mset.created = time.Now().UTC() - switch mset.cfg.Storage { case MemoryStorage: ms, err := newMemStore(&mset.cfg) @@ -5196,7 +5282,10 @@ func (mset *stream) getDirectRequest(req *JSApiMsgGetRequest, reply string) { } else { // This is a batch request, capture initial numPending. isBatchRequest = true - np, validThrough = store.NumPending(seq, req.NextFor, false) + var err error + if np, validThrough, err = store.NumPending(seq, req.NextFor, false); err != nil { + return + } } // Grab MaxBytes @@ -5287,9 +5376,12 @@ func (mset *stream) getDirectRequest(req *JSApiMsgGetRequest, reply string) { // If batch was requested send EOB. if isBatchRequest { - // Update if the stream's lasts sequence has moved past our validThrough. - if mset.lastSeq() > validThrough { - np, _ = store.NumPending(seq, req.NextFor, false) + // Update if the stream's last sequence has moved past our validThrough. + if mset.lseq > validThrough { + var err error + if np, _, err = store.NumPending(seq, req.NextFor, false); err != nil { + return + } } hdr := fmt.Appendf(nil, eob, np, lseq) mset.outq.send(newJSPubMsg(reply, _EMPTY_, _EMPTY_, hdr, nil, nil, 0)) @@ -6507,7 +6599,7 @@ func (mset *stream) processJetStreamBatchMsg(batchId, subject, reply string, hdr } // Reject unsupported headers. - if getExpectedLastMsgId(hdr) != _EMPTY_ { + if getExpectedLastMsgId(bhdr) != _EMPTY_ { return errorOnUnsupported(seq, JSExpectedLastMsgId) } @@ -7172,6 +7264,20 @@ func (mset *stream) getPublicConsumers() []*consumer { return obs } +// This returns all consumers that are DIRECT. +func (mset *stream) getDirectConsumers() []*consumer { + mset.clsMu.RLock() + defer mset.clsMu.RUnlock() + + var obs []*consumer + for _, o := range mset.cList { + if o.cfg.Direct { + obs = append(obs, o) + } + } + return obs +} + // 2 minutes plus up to 30s jitter. const ( defaultCheckInterestStateT = 2 * time.Minute @@ -7593,7 +7699,19 @@ func (mset *stream) ackMsg(o *consumer, seq uint64) bool { // Only propose message deletion to the stream if we're consumer leader, otherwise all followers would also propose. // We must be the consumer leader, since we know for sure we've stored the message and don't register as pre-ack. if o != nil && !o.IsLeader() { + // Currently, interest-based streams can race on "no interest" because consumer creates/updates go over + // the meta layer and published messages go over the stream layer. Some servers could then either store + // or not store some initial set of messages that gained new interest. To get the stream back in sync, + // we allow moving the first sequence up. + // TODO(mvv): later on only the stream leader should determine "no interest" + interestRaiseFirst := mset.cfg.Retention == InterestPolicy && seq == state.FirstSeq mset.mu.Unlock() + if interestRaiseFirst { + if _, err := store.RemoveMsg(seq); err == ErrStoreEOF { + // This should not happen, but being pedantic. + mset.registerPreAckLock(o, seq) + } + } // Must still mark as removal if follower. If we become leader later, we must be able to retry the proposal. return true } diff --git a/vendor/github.com/nats-io/nats-server/v2/server/stree/parts.go b/vendor/github.com/nats-io/nats-server/v2/server/stree/parts.go index 9ac059677e7..af5dd9c1762 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/stree/parts.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/stree/parts.go @@ -36,7 +36,6 @@ func genParts(filter []byte, parts [][]byte) [][]byte { } start = i + 1 } else if i < e && filter[i+1] == fwc && i+1 == e { - // We have a fwc if i > start { parts = append(parts, filter[start:i+1]) } @@ -53,6 +52,10 @@ func genParts(filter []byte, parts [][]byte) [][]byte { if next := i + 1; next == e || next < e && filter[next] != tsep { continue } + // Full wildcard must be terminal. + if filter[i] == fwc && i < e { + break + } // We start with a pwc or fwc. parts = append(parts, filter[i:i+1]) if i+1 <= e { diff --git a/vendor/github.com/nats-io/nats-server/v2/server/subject_transform.go b/vendor/github.com/nats-io/nats-server/v2/server/subject_transform.go index e131b176eff..6379a0ad320 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/subject_transform.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/subject_transform.go @@ -19,6 +19,7 @@ import ( "math" "math/rand" "regexp" + "slices" "strconv" "strings" ) @@ -378,7 +379,7 @@ func transformTokenize(subject string) string { // We need to make the appropriate markers for the wildcards etc. i := 1 var nda []string - for _, token := range strings.Split(subject, tsep) { + for token := range strings.SplitSeq(subject, tsep) { if token == pwcs { nda = append(nda, fmt.Sprintf("$%d", i)) i++ @@ -399,7 +400,7 @@ func transformUntokenize(subject string) (string, []string) { var phs []string var nda []string - for _, token := range strings.Split(subject, tsep) { + for token := range strings.SplitSeq(subject, tsep) { if args := getMappingFunctionArgs(wildcardMappingFunctionRegEx, token); (len(token) > 1 && token[0] == '$' && token[1] >= '1' && token[1] <= '9') || (len(args) == 1 && args[0] != _EMPTY_) { phs = append(phs, token) nda = append(nda, pwcs) @@ -439,7 +440,7 @@ func (tr *subjectTransform) Match(subject string) (string, error) { tts := tokenizeSubject(subject) // TODO(jnm): optimization -> not sure this is actually needed but was there in initial code - if !isValidLiteralSubject(tts) { + if !isValidLiteralSubject(slices.Values(tts)) { return _EMPTY_, ErrBadSubject } diff --git a/vendor/github.com/nats-io/nats-server/v2/server/sublist.go b/vendor/github.com/nats-io/nats-server/v2/server/sublist.go index f759cb084fb..6d4d145f3e4 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/sublist.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/sublist.go @@ -16,6 +16,7 @@ package server import ( "bytes" "errors" + "iter" "strings" "sync" "sync/atomic" @@ -357,16 +358,6 @@ func (s *Sublist) chkForRemoveNotification(subject, queue string) { func (s *Sublist) Insert(sub *subscription) error { // copy the subject since we hold this and this might be part of a large byte slice. subject := string(sub.subject) - tsa := [32]string{} - tokens := tsa[:0] - start := 0 - for i := 0; i < len(subject); i++ { - if subject[i] == btsep { - tokens = append(tokens, subject[start:i]) - start = i + 1 - } - } - tokens = append(tokens, subject[start:]) s.Lock() @@ -374,7 +365,7 @@ func (s *Sublist) Insert(sub *subscription) error { var n *node l := s.root - for _, t := range tokens { + for t := range strings.SplitSeq(subject, tsep) { lt := len(t) if lt == 0 || sfwc { s.Unlock() @@ -851,16 +842,6 @@ type lnt struct { // Raw low level remove, can do batches with lock held outside. func (s *Sublist) remove(sub *subscription, shouldLock bool, doCacheUpdates bool) error { subject := string(sub.subject) - tsa := [32]string{} - tokens := tsa[:0] - start := 0 - for i := 0; i < len(subject); i++ { - if subject[i] == btsep { - tokens = append(tokens, subject[start:i]) - start = i + 1 - } - } - tokens = append(tokens, subject[start:]) if shouldLock { s.Lock() @@ -875,7 +856,7 @@ func (s *Sublist) remove(sub *subscription, shouldLock bool, doCacheUpdates bool var lnts [32]lnt levels := lnts[:0] - for _, t := range tokens { + for t := range strings.SplitSeq(subject, tsep) { lt := len(t) if lt == 0 || sfwc { return ErrInvalidSubject @@ -1230,8 +1211,7 @@ func isValidSubject(subject string, checkRunes bool) bool { } } sfwc := false - tokens := strings.Split(subject, tsep) - for _, t := range tokens { + for t := range strings.SplitSeq(subject, tsep) { length := len(t) if length == 0 || sfwc { return false @@ -1254,12 +1234,12 @@ func isValidSubject(subject string, checkRunes bool) bool { // IsValidLiteralSubject returns true if a subject is valid and literal (no wildcards), false otherwise func IsValidLiteralSubject(subject string) bool { - return isValidLiteralSubject(strings.Split(subject, tsep)) + return isValidLiteralSubject(strings.SplitSeq(subject, tsep)) } // isValidLiteralSubject returns true if the tokens are valid and literal (no wildcards), false otherwise -func isValidLiteralSubject(tokens []string) bool { - for _, t := range tokens { +func isValidLiteralSubject(tokens iter.Seq[string]) bool { + for t := range tokens { if len(t) == 0 { return false } @@ -1279,9 +1259,8 @@ func ValidateMapping(src string, dest string) error { if dest == _EMPTY_ { return nil } - subjectTokens := strings.Split(dest, tsep) sfwc := false - for _, t := range subjectTokens { + for t := range strings.SplitSeq(dest, tsep) { length := len(t) if length == 0 || sfwc { return &mappingDestinationErr{t, ErrInvalidMappingDestinationSubject} diff --git a/vendor/github.com/nats-io/nats-server/v2/server/util.go b/vendor/github.com/nats-io/nats-server/v2/server/util.go index dcfa1000d43..4e08e3f0cda 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/util.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/util.go @@ -23,6 +23,7 @@ import ( "net" "net/url" "reflect" + "runtime" "strconv" "strings" "time" @@ -340,3 +341,25 @@ func generateInfoJSON(info *Info) []byte { pcs := [][]byte{[]byte("INFO"), b, []byte(CR_LF)} return bytes.Join(pcs, []byte(" ")) } + +// parallelTaskQueue starts a number of goroutines and returns a channel +// which functions can be sent to for queued parallel execution. The +// goroutines will stop running when the returned channel is closed and +// all queued tasks have completed. The passed in mp limits concurrency, +// or a value <= 0 will default to GOMAXPROCS. +func parallelTaskQueue(mp int) chan<- func() { + if rmp := runtime.GOMAXPROCS(-1); mp <= 0 { + mp = rmp + } else { + mp = max(rmp, mp) + } + tq := make(chan func(), mp) + for range mp { + go func() { + for fn := range tq { + fn() + } + }() + } + return tq +} diff --git a/vendor/github.com/nats-io/nats-server/v2/server/websocket.go b/vendor/github.com/nats-io/nats-server/v2/server/websocket.go index cc83b465bba..7f352996426 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/websocket.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/websocket.go @@ -31,6 +31,7 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "time" "unicode/utf8" @@ -211,6 +212,7 @@ func (c *client) wsRead(r *wsReadInfo, ior io.Reader, buf []byte) ([][]byte, err err error pos int max = len(buf) + mpay = int(atomic.LoadInt32(&c.mpay)) ) for pos != max { if r.fs { @@ -324,7 +326,7 @@ func (c *client) wsRead(r *wsReadInfo, ior io.Reader, buf []byte) ([][]byte, err // When we have the final frame and we have read the full payload, // we can decompress it. if r.ff && r.rem == 0 { - b, err = r.decompress() + b, err = r.decompress(mpay) if err != nil { return bufs, err } @@ -398,7 +400,16 @@ func (r *wsReadInfo) ReadByte() (byte, error) { return b, nil } -func (r *wsReadInfo) decompress() ([]byte, error) { +// decompress decompresses the collected buffers. +// The size of the decompressed buffer will be limited to the `mpay` value. +// If, while decompressing, the resulting uncompressed buffer exceeds this +// limit, the decompression stops and an empty buffer and the ErrMaxPayload +// error are returned. +func (r *wsReadInfo) decompress(mpay int) ([]byte, error) { + // If not limit is specified, use the default maximum payload size. + if mpay <= 0 { + mpay = MAX_PAYLOAD_SIZE + } r.coff = 0 // As per https://tools.ietf.org/html/rfc7692#section-7.2.2 // add 0x00, 0x00, 0xff, 0xff and then a final block so that flate reader @@ -413,8 +424,15 @@ func (r *wsReadInfo) decompress() ([]byte, error) { } else { d.(flate.Resetter).Reset(r, nil) } - // This will do the decompression. - b, err := io.ReadAll(d) + // Use a LimitedReader to limit the decompressed size. + // We use "limit+1" bytes for "N" so we can detect if the limit is exceeded. + lr := io.LimitedReader{R: d, N: int64(mpay + 1)} + b, err := io.ReadAll(&lr) + if err == nil && len(b) > mpay { + // Decompressed data exceeds the maximum payload size. + b, err = nil, ErrMaxPayload + } + lr.R = nil decompressorPool.Put(d) // Now reset the compressed buffers list. r.cbufs = nil @@ -1294,7 +1312,7 @@ func (s *Server) createWSClient(conn net.Conn, ws *websocket) *client { } c.initClient() c.Debugf("Client connection created") - c.sendProtoNow(c.generateClientInfoJSON(info)) + c.sendProtoNow(c.generateClientInfoJSON(info, true)) c.mu.Unlock() s.mu.Lock() diff --git a/vendor/github.com/nats-io/nats.go/README.md b/vendor/github.com/nats-io/nats.go/README.md index 30783815e50..9340a8899ec 100644 --- a/vendor/github.com/nats-io/nats.go/README.md +++ b/vendor/github.com/nats-io/nats.go/README.md @@ -23,7 +23,7 @@ A [Go](http://golang.org) client for the [NATS messaging system](https://nats.io go get github.com/nats-io/nats.go@latest # To get a specific version: -go get github.com/nats-io/nats.go@v1.46.1 +go get github.com/nats-io/nats.go@v1.48.0 # Note that the latest major version for NATS Server is v2: go get github.com/nats-io/nats-server/v2@latest diff --git a/vendor/github.com/nats-io/nats.go/context.go b/vendor/github.com/nats-io/nats.go/context.go index 382335e836c..b3dfa4f06d7 100644 --- a/vendor/github.com/nats-io/nats.go/context.go +++ b/vendor/github.com/nats-io/nats.go/context.go @@ -95,7 +95,7 @@ func (nc *Conn) oldRequestWithContext(ctx context.Context, subj string, hdr, dat s.AutoUnsubscribe(1) defer s.Unsubscribe() - err = nc.publish(subj, inbox, hdr, data) + err = nc.publish(subj, inbox, false, hdr, data) if err != nil { return nil, err } diff --git a/vendor/github.com/nats-io/nats.go/enc.go b/vendor/github.com/nats-io/nats.go/enc.go index 34a3fae7fe6..f2297b49a7b 100644 --- a/vendor/github.com/nats-io/nats.go/enc.go +++ b/vendor/github.com/nats-io/nats.go/enc.go @@ -107,7 +107,7 @@ func (c *EncodedConn) Publish(subject string, v any) error { if err != nil { return err } - return c.Conn.publish(subject, _EMPTY_, nil, b) + return c.Conn.publish(subject, _EMPTY_, false, nil, b) } // PublishRequest will perform a Publish() expecting a response on the @@ -120,7 +120,7 @@ func (c *EncodedConn) PublishRequest(subject, reply string, v any) error { if err != nil { return err } - return c.Conn.publish(subject, reply, nil, b) + return c.Conn.publish(subject, reply, true, nil, b) } // Request will create an Inbox and perform a Request() call diff --git a/vendor/github.com/nats-io/nats.go/js.go b/vendor/github.com/nats-io/nats.go/js.go index 90f8df61576..def5941184e 100644 --- a/vendor/github.com/nats-io/nats.go/js.go +++ b/vendor/github.com/nats-io/nats.go/js.go @@ -1132,7 +1132,7 @@ func (js *js) PublishMsgAsync(m *Msg, opts ...PubOpt) (PubAckFuture, error) { if err != nil { return nil, err } - if err := js.nc.publish(m.Subject, reply, hdr, m.Data); err != nil { + if err := js.nc.publish(m.Subject, reply, false, hdr, m.Data); err != nil { js.clearPAF(id) return nil, err } @@ -3560,7 +3560,7 @@ func (js *js) apiRequestWithContext(ctx context.Context, subj string, data []byt } if js.opts.shouldTrace { ctrace := js.opts.ctrace - if ctrace.RequestSent != nil { + if ctrace.ResponseReceived != nil { ctrace.ResponseReceived(subj, resp.Data, resp.Header) } } diff --git a/vendor/github.com/nats-io/nats.go/kv.go b/vendor/github.com/nats-io/nats.go/kv.go index 499049193bc..dda644ec732 100644 --- a/vendor/github.com/nats-io/nats.go/kv.go +++ b/vendor/github.com/nats-io/nats.go/kv.go @@ -354,7 +354,6 @@ const ( kvSubjectsTmpl = "$KV.%s.>" kvSubjectsPreTmpl = "$KV.%s." kvSubjectsPreDomainTmpl = "%s.$KV.%s." - kvNoPending = "0" ) // Regex for valid keys and buckets. diff --git a/vendor/github.com/nats-io/nats.go/nats.go b/vendor/github.com/nats-io/nats.go/nats.go index 7e8c11b7ea6..c5694697106 100644 --- a/vendor/github.com/nats-io/nats.go/nats.go +++ b/vendor/github.com/nats-io/nats.go/nats.go @@ -48,7 +48,7 @@ import ( // Default Constants const ( - Version = "1.46.1" + Version = "1.48.0" DefaultURL = "nats://127.0.0.1:4222" DefaultPort = 4222 DefaultMaxReconnect = 60 @@ -151,6 +151,7 @@ var ( ErrMaxAccountConnectionsExceeded = errors.New("nats: maximum account active connections exceeded") ErrConnectionNotTLS = errors.New("nats: connection is not tls") ErrMaxSubscriptionsExceeded = errors.New("nats: server maximum subscriptions exceeded") + ErrWebSocketHeadersAlreadySet = errors.New("nats: websocket connection headers already set") ) // GetDefaultOptions returns default configuration options for the client. @@ -250,6 +251,9 @@ type UserInfoCB func() (string, string) // whole list of URLs and failed to reconnect. type ReconnectDelayHandler func(attempts int) time.Duration +// WebSocketHeadersHandler is an optional callback handler for generating token used for WebSocket connections. +type WebSocketHeadersHandler func() (http.Header, error) + // asyncCB is used to preserve order for async callbacks. type asyncCB struct { f func() @@ -524,6 +528,17 @@ type Options struct { // from SubscribeSync if the server returns a permissions error for a subscription. // Defaults to false. PermissionErrOnSubscribe bool + + // WebSocketConnectionHeaders is an optional http request headers to be sent with the WebSocket request. + WebSocketConnectionHeaders http.Header + + // WebSocketConnectionHeadersHandler is an optional callback handler for generating token used for WebSocket connections. + WebSocketConnectionHeadersHandler WebSocketHeadersHandler + + // SkipSubjectValidation will disable publish subject validation. + // NOTE: This is not recommended in general, as the performance gain is minimal + // and may lead to breaking protocol. + SkipSubjectValidation bool } const ( @@ -1472,6 +1487,50 @@ func TLSHandshakeFirst() Option { } } +// WebSocketConnectionHeaders sets a fixed set of HTTP headers that will be +// sent during the WebSocket connection handshake. +// This option is mutually exclusive with WebSocketConnectionHeadersHandler; +// if a headers handler has already been configured, it returns +// ErrWebSocketHeadersAlreadySet. +func WebSocketConnectionHeaders(headers http.Header) Option { + return func(o *Options) error { + if o.WebSocketConnectionHeadersHandler != nil { + return ErrWebSocketHeadersAlreadySet + } + o.WebSocketConnectionHeaders = headers + return nil + } +} + +// WebSocketConnectionHeadersHandler registers a callback used to supply HTTP +// headers for the WebSocket connection handshake. +// This option is mutually exclusive with WebSocketConnectionHeaders; if +// non-empty static headers have already been configured, it returns +// ErrWebSocketHeadersAlreadySet. +func WebSocketConnectionHeadersHandler(cb WebSocketHeadersHandler) Option { + return func(o *Options) error { + if len(o.WebSocketConnectionHeaders) != 0 { + return ErrWebSocketHeadersAlreadySet + } + o.WebSocketConnectionHeadersHandler = cb + return nil + } +} + +// SkipSubjectValidation is an Option to skip subject validation when +// publishing messages. +// By default, subject validation is performed to ensure that subjects +// are valid according to NATS subject syntax (no spaces newlines and tabs). +// NOTE: It is not recommended to use this option as the performance gain +// is minimal and disabling subject validation can lead breaking protocol +// rules. +func SkipSubjectValidation() Option { + return func(o *Options) error { + o.SkipSubjectValidation = true + return nil + } +} + // Handler processing // SetDisconnectHandler will set the disconnect event handler. @@ -1671,8 +1730,10 @@ func (o Options) Connect() (*Conn, error) { return nil, err } - if connectionEstablished && nc.Opts.ConnectedCB != nil { - nc.ach.push(func() { nc.Opts.ConnectedCB(nc) }) + if connectionEstablished { + if connectedCB := nc.Opts.ConnectedCB; connectedCB != nil { + nc.ach.push(func() { connectedCB(nc) }) + } } return nc, nil @@ -2747,8 +2808,10 @@ func (nc *Conn) sendConnect() error { // Construct the CONNECT protocol string cProto, err := nc.connectProto() if err != nil { - if !nc.initc && nc.Opts.AsyncErrorCB != nil { - nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, err) }) + if !nc.initc { + if asyncErrorCB := nc.Opts.AsyncErrorCB; asyncErrorCB != nil { + nc.ach.push(func() { asyncErrorCB(nc, nil, err) }) + } } return err } @@ -2764,8 +2827,10 @@ func (nc *Conn) sendConnect() error { // reading byte-by-byte here is ok. proto, err := nc.readProto() if err != nil { - if !nc.initc && nc.Opts.AsyncErrorCB != nil { - nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, err) }) + if !nc.initc { + if asyncErrorCB := nc.Opts.AsyncErrorCB; asyncErrorCB != nil { + nc.ach.push(func() { asyncErrorCB(nc, nil, err) }) + } } return err } @@ -2775,8 +2840,10 @@ func (nc *Conn) sendConnect() error { // Read the rest now... proto, err = nc.readProto() if err != nil { - if !nc.initc && nc.Opts.AsyncErrorCB != nil { - nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, err) }) + if !nc.initc { + if asyncErrorCB := nc.Opts.AsyncErrorCB; asyncErrorCB != nil { + nc.ach.push(func() { asyncErrorCB(nc, nil, err) }) + } } return err } @@ -2884,10 +2951,10 @@ func (nc *Conn) doReconnect(err error, forceReconnect bool) { // Perform appropriate callback if needed for a disconnect. // DisconnectedErrCB has priority over deprecated DisconnectedCB if !nc.initc { - if nc.Opts.DisconnectedErrCB != nil { - nc.ach.push(func() { nc.Opts.DisconnectedErrCB(nc, err) }) - } else if nc.Opts.DisconnectedCB != nil { - nc.ach.push(func() { nc.Opts.DisconnectedCB(nc) }) + if disconnectedErrCB := nc.Opts.DisconnectedErrCB; disconnectedErrCB != nil { + nc.ach.push(func() { disconnectedErrCB(nc, err) }) + } else if disconnectedCB := nc.Opts.DisconnectedCB; disconnectedCB != nil { + nc.ach.push(func() { disconnectedCB(nc) }) } } else if nc.Opts.RetryOnFailedConnect && nc.initc && err != nil { // For initial connection failure with RetryOnFailedConnect, @@ -2996,8 +3063,8 @@ func (nc *Conn) doReconnect(err error, forceReconnect bool) { // Continue to hold the lock if err != nil { // Perform appropriate callback for a failed connection attempt. - if nc.Opts.ReconnectErrCB != nil { - nc.ach.push(func() { nc.Opts.ReconnectErrCB(nc, err) }) + if reconnectErrCB := nc.Opts.ReconnectErrCB; reconnectErrCB != nil { + nc.ach.push(func() { reconnectErrCB(nc, err) }) } nc.err = nil continue @@ -3047,10 +3114,10 @@ func (nc *Conn) doReconnect(err error, forceReconnect bool) { // Queue up the correct callback. If we are in initial connect state // (using retry on failed connect), we will call the ConnectedCB, // otherwise the ReconnectedCB. - if nc.Opts.ReconnectedCB != nil && !nc.initc { - nc.ach.push(func() { nc.Opts.ReconnectedCB(nc) }) - } else if nc.Opts.ConnectedCB != nil && nc.initc { - nc.ach.push(func() { nc.Opts.ConnectedCB(nc) }) + if reconnectedCB := nc.Opts.ReconnectedCB; reconnectedCB != nil && !nc.initc { + nc.ach.push(func() { reconnectedCB(nc) }) + } else if connectedCB := nc.Opts.ConnectedCB; connectedCB != nil && nc.initc { + nc.ach.push(func() { connectedCB(nc) }) } // If we are here with a retry on failed connect, indicate that the @@ -3364,8 +3431,8 @@ func (nc *Conn) processMsg(data []byte) { // We will pass the message through but send async error. nc.mu.Lock() nc.err = ErrBadHeaderMsg - if nc.Opts.AsyncErrorCB != nil { - nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, sub, ErrBadHeaderMsg) }) + if asyncErrorCB := nc.Opts.AsyncErrorCB; asyncErrorCB != nil { + nc.ach.push(func() { asyncErrorCB(nc, sub, ErrBadHeaderMsg) }) } nc.mu.Unlock() } @@ -3542,8 +3609,8 @@ slowConsumer: // is already experiencing client-side slow consumer situation. nc.mu.Lock() nc.err = ErrSlowConsumer - if nc.Opts.AsyncErrorCB != nil { - nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, sub, ErrSlowConsumer) }) + if asyncErrorCB := nc.Opts.AsyncErrorCB; asyncErrorCB != nil { + nc.ach.push(func() { asyncErrorCB(nc, sub, ErrSlowConsumer) }) } nc.mu.Unlock() } else { @@ -3586,8 +3653,8 @@ func (nc *Conn) processTransientError(err error) { } } } - if nc.Opts.AsyncErrorCB != nil { - nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, err) }) + if asyncErrorCB := nc.Opts.AsyncErrorCB; asyncErrorCB != nil { + nc.ach.push(func() { asyncErrorCB(nc, nil, err) }) } nc.mu.Unlock() } @@ -3599,8 +3666,10 @@ func (nc *Conn) processTransientError(err error) { // Connection lock is held on entry func (nc *Conn) processAuthError(err error) bool { nc.err = err - if !nc.initc && nc.Opts.AsyncErrorCB != nil { - nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, err) }) + if !nc.initc { + if asyncErrorCB := nc.Opts.AsyncErrorCB; asyncErrorCB != nil { + nc.ach.push(func() { asyncErrorCB(nc, nil, err) }) + } } // We should give up if we tried twice on this server and got the // same error. This behavior can be modified using IgnoreAuthErrorAbort. @@ -3645,8 +3714,8 @@ func (nc *Conn) flusher() { if nc.err == nil { nc.err = err } - if nc.Opts.AsyncErrorCB != nil { - nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, err) }) + if asyncErrorCB := nc.Opts.AsyncErrorCB; asyncErrorCB != nil { + nc.ach.push(func() { asyncErrorCB(nc, nil, err) }) } } } @@ -3760,12 +3829,16 @@ func (nc *Conn) processInfo(info string) error { if !nc.Opts.NoRandomize { nc.shufflePool(1) } - if !nc.initc && nc.Opts.DiscoveredServersCB != nil { - nc.ach.push(func() { nc.Opts.DiscoveredServersCB(nc) }) + if !nc.initc { + if discoveredServersCB := nc.Opts.DiscoveredServersCB; discoveredServersCB != nil { + nc.ach.push(func() { discoveredServersCB(nc) }) + } } } - if !nc.initc && ncInfo.LameDuckMode && nc.Opts.LameDuckModeHandler != nil { - nc.ach.push(func() { nc.Opts.LameDuckModeHandler(nc) }) + if !nc.initc && ncInfo.LameDuckMode { + if lameDuckModeHandler := nc.Opts.LameDuckModeHandler; lameDuckModeHandler != nil { + nc.ach.push(func() { lameDuckModeHandler(nc) }) + } } return nil } @@ -3862,7 +3935,7 @@ func (nc *Conn) kickFlusher() { // argument is left untouched and needs to be correctly interpreted on // the receiver. func (nc *Conn) Publish(subj string, data []byte) error { - return nc.publish(subj, _EMPTY_, nil, data) + return nc.publish(subj, _EMPTY_, false, nil, data) } // Header represents the optional Header for a NATS message, @@ -4005,27 +4078,71 @@ func (nc *Conn) PublishMsg(m *Msg) error { if err != nil { return err } - return nc.publish(m.Subject, m.Reply, hdr, m.Data) + validateReply := m.Reply != _EMPTY_ + return nc.publish(m.Subject, m.Reply, validateReply, hdr, m.Data) } // PublishRequest will perform a Publish() expecting a response on the // reply subject. Use Request() for automatically waiting for a response // inline. func (nc *Conn) PublishRequest(subj, reply string, data []byte) error { - return nc.publish(subj, reply, nil, data) + return nc.publish(subj, reply, true, nil, data) } // Used for handrolled Itoa const digits = "0123456789" +// validateSubject checks if the subject contains characters that break the NATS protocol. +// Uses an adaptive algorithm: manual loop for short subjects (< 16 chars) and +// SIMD-optimized strings.IndexByte for longer subjects. +func validateSubject(subj string) error { + if subj == "" { + return ErrBadSubject + } + + // Adaptive threshold based on benchmark data showing crossover at ~15-20 characters. + const lengthThreshold = 16 + + if len(subj) < lengthThreshold { + // Fast path for short subjects (< 16 chars) + // Short-circuit on non-control characters. + for i := range len(subj) { + c := subj[i] + if c <= ' ' && (c == ' ' || c == '\t' || c == '\r' || c == '\n') { + return ErrBadSubject + } + } + return nil + } + + // Optimized path for long subjects (>= 16 chars) + // Uses SIMD-optimized strings.IndexByte (processes 16+ bytes per instruction) + if strings.IndexByte(subj, ' ') >= 0 || + strings.IndexByte(subj, '\t') >= 0 || + strings.IndexByte(subj, '\r') >= 0 || + strings.IndexByte(subj, '\n') >= 0 { + return ErrBadSubject + } + return nil +} + // publish is the internal function to publish messages to a nats-server. // Sends a protocol data message by queuing into the bufio writer // and kicking the flush go routine. These writes should be protected. -func (nc *Conn) publish(subj, reply string, hdr, data []byte) error { +func (nc *Conn) publish(subj, reply string, validateReply bool, hdr, data []byte) error { if nc == nil { return ErrInvalidConnection } - if subj == "" { + if !nc.Opts.SkipSubjectValidation { + if err := validateSubject(subj); err != nil { + return err + } + if validateReply { + if err := validateSubject(reply); err != nil { + return ErrBadSubject + } + } + } else if subj == _EMPTY_ { return ErrBadSubject } nc.mu.Lock() @@ -4191,7 +4308,7 @@ func (nc *Conn) createNewRequestAndSend(subj string, hdr, data []byte) (chan *Ms } nc.mu.Unlock() - if err := nc.publish(subj, respInbox, hdr, data); err != nil { + if err := nc.publish(subj, respInbox, false, hdr, data); err != nil { return nil, token, err } @@ -4287,7 +4404,7 @@ func (nc *Conn) oldRequest(subj string, hdr, data []byte, timeout time.Duration) s.AutoUnsubscribe(1) defer s.Unsubscribe() - err = nc.publish(subj, inbox, hdr, data) + err = nc.publish(subj, inbox, false, hdr, data) if err != nil { return nil, err } @@ -5607,8 +5724,8 @@ func (nc *Conn) close(status Status, doCBs bool, err error) { nc.ach.push(func() { disconnectedCB(nc) }) } } - if nc.Opts.ClosedCB != nil { - nc.ach.push(func() { nc.Opts.ClosedCB(nc) }) + if closedCB := nc.Opts.ClosedCB; closedCB != nil { + nc.ach.push(func() { closedCB(nc) }) } } // If this is terminal, then we have to notify the asyncCB handler that diff --git a/vendor/github.com/nats-io/nats.go/ws.go b/vendor/github.com/nats-io/nats.go/ws.go index 46d8300dec4..36429179394 100644 --- a/vendor/github.com/nats-io/nats.go/ws.go +++ b/vendor/github.com/nats-io/nats.go/ws.go @@ -610,6 +610,9 @@ func (nc *Conn) wsInitHandshake(u *url.URL) error { if compress { req.Header.Add("Sec-WebSocket-Extensions", wsPMCReqHeaderValue) } + if err := nc.wsUpdateConnectionHeaders(req); err != nil { + return err + } if err := req.Write(nc.conn); err != nil { return err } @@ -728,6 +731,25 @@ func (nc *Conn) wsEnqueueControlMsg(needsLock bool, frameType wsOpCode, payload nc.bw.flush() } +func (nc *Conn) wsUpdateConnectionHeaders(req *http.Request) error { + var headers http.Header + var err error + if nc.Opts.WebSocketConnectionHeadersHandler != nil { + headers, err = nc.Opts.WebSocketConnectionHeadersHandler() + if err != nil { + return err + } + } else { + headers = nc.Opts.WebSocketConnectionHeaders + } + for key, values := range headers { + for _, val := range values { + req.Header.Add(key, val) + } + } + return nil +} + func wsPMCExtensionSupport(header http.Header) (bool, bool) { for _, extensionList := range header["Sec-Websocket-Extensions"] { extensions := strings.Split(extensionList, ",") diff --git a/vendor/github.com/oklog/run/LICENSE b/vendor/github.com/oklog/run/LICENSE index 261eeb9e9f8..374773d07d1 100644 --- a/vendor/github.com/oklog/run/LICENSE +++ b/vendor/github.com/oklog/run/LICENSE @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright 2017 Peter Bourgon Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/oklog/run/README.md b/vendor/github.com/oklog/run/README.md index eba7d11cf3a..18a10a3d4e7 100644 --- a/vendor/github.com/oklog/run/README.md +++ b/vendor/github.com/oklog/run/README.md @@ -1,7 +1,7 @@ # run -[![GoDoc](https://godoc.org/github.com/oklog/run?status.svg)](https://godoc.org/github.com/oklog/run) -[![Build Status](https://img.shields.io/endpoint.svg?url=https%3A%2F%2Factions-badge.atrox.dev%2Foklog%2Frun%2Fbadge&style=flat-square&label=build)](https://github.com/oklog/run/actions?query=workflow%3ATest) +[![GoDoc](https://godoc.org/github.com/oklog/run?status.svg)](https://godoc.org/github.com/oklog/run) +[![test](https://github.com/oklog/run/actions/workflows/test.yaml/badge.svg?branch=main&event=push)](https://github.com/oklog/run/actions/workflows/test.yaml) [![Go Report Card](https://goreportcard.com/badge/github.com/oklog/run)](https://goreportcard.com/report/github.com/oklog/run) [![Apache 2 licensed](https://img.shields.io/badge/license-Apache2-blue.svg)](https://raw.githubusercontent.com/oklog/run/master/LICENSE) @@ -16,8 +16,8 @@ finally returns control to the caller only once all actors have returned. This general-purpose API allows callers to model pretty much any runnable task, and achieve well-defined lifecycle semantics for the group. -run.Group was written to manage component lifecycles in func main for -[OK Log](https://github.com/oklog/oklog). +run.Group was written to manage component lifecycles in func main for +[OK Log](https://github.com/oklog/oklog). But it's useful in any circumstance where you need to orchestrate multiple goroutines as a unit whole. [Click here](https://www.youtube.com/watch?v=LHe1Cb_Ud_M&t=15m45s) to see a @@ -62,14 +62,30 @@ g.Add(func() error { }) ``` +### http.Server graceful Shutdown + +```go +httpServer := &http.Server{ + Addr: "localhost:8080", + Handler: ..., +} +g.Add(func() error { + return httpServer.ListenAndServe() +}, func(error) { + ctx, cancel := context.WithTimeout(context.TODO(), 3*time.Second) + defer cancel() + httpServer.Shutdown(ctx) +}) +``` + ## Comparisons -Package run is somewhat similar to package -[errgroup](https://godoc.org/golang.org/x/sync/errgroup), +Package run is somewhat similar to package +[errgroup](https://godoc.org/golang.org/x/sync/errgroup), except it doesn't require actor goroutines to understand context semantics. It's somewhat similar to package -[tomb.v1](https://godoc.org/gopkg.in/tomb.v1) or +[tomb.v1](https://godoc.org/gopkg.in/tomb.v1) or [tomb.v2](https://godoc.org/gopkg.in/tomb.v2), -except it has a much smaller API surface, delegating e.g. staged shutdown of +except it has a much smaller API surface, delegating e.g. staged shutdown of goroutines to the caller. diff --git a/vendor/github.com/oklog/run/actors.go b/vendor/github.com/oklog/run/actors.go index ef93495d3f0..ad6aed8664f 100644 --- a/vendor/github.com/oklog/run/actors.go +++ b/vendor/github.com/oklog/run/actors.go @@ -2,22 +2,41 @@ package run import ( "context" + "errors" "fmt" "os" "os/signal" ) +// ContextHandler returns an actor, i.e. an execute and interrupt func, that +// terminates when the provided context is canceled. +func ContextHandler(ctx context.Context) (execute func() error, interrupt func(error)) { + ctx, cancel := context.WithCancel(ctx) + return func() error { + <-ctx.Done() + return ctx.Err() + }, func(error) { + cancel() + } +} + // SignalHandler returns an actor, i.e. an execute and interrupt func, that -// terminates with SignalError when the process receives one of the provided -// signals, or the parent context is canceled. +// terminates with ErrSignal when the process receives one of the provided +// signals, or with ctx.Error() when the parent context is canceled. If no +// signals are provided, the actor will terminate on any signal, per +// [signal.Notify]. func SignalHandler(ctx context.Context, signals ...os.Signal) (execute func() error, interrupt func(error)) { ctx, cancel := context.WithCancel(ctx) return func() error { - c := make(chan os.Signal, 1) - signal.Notify(c, signals...) + testc := getTestSigChan(ctx) + sigc := make(chan os.Signal, 1) + signal.Notify(sigc, signals...) + defer signal.Stop(sigc) select { - case sig := <-c: - return SignalError{Signal: sig} + case sig := <-testc: + return &SignalError{Signal: sig} + case sig := <-sigc: + return &SignalError{Signal: sig} case <-ctx.Done(): return ctx.Err() } @@ -26,13 +45,52 @@ func SignalHandler(ctx context.Context, signals ...os.Signal) (execute func() er } } -// SignalError is returned by the signal handler's execute function -// when it terminates due to a received signal. +type testSigChanKey struct{} + +func getTestSigChan(ctx context.Context) <-chan os.Signal { + c, _ := ctx.Value(testSigChanKey{}).(<-chan os.Signal) // can be nil + return c +} + +func putTestSigChan(ctx context.Context, c <-chan os.Signal) context.Context { + return context.WithValue(ctx, testSigChanKey{}, c) +} + +// SignalError is returned by the signal handler's execute function when it +// terminates due to a received signal. +// +// SignalError has a design error that impacts comparison with errors.As. +// Callers should prefer using errors.Is(err, ErrSignal) to check for signal +// errors, and should only use errors.As in the rare case that they need to +// program against the specific os.Signal value. type SignalError struct { Signal os.Signal } // Error implements the error interface. +// +// It was a design error to define this method on a value receiver rather than a +// pointer receiver. For compatibility reasons it won't be changed. func (e SignalError) Error() string { return fmt.Sprintf("received signal %s", e.Signal) } + +// Is addresses a design error in the SignalError type, so that errors.Is with +// ErrSignal will return true. +func (e SignalError) Is(err error) bool { + return errors.Is(err, ErrSignal) +} + +// As fixes a design error in the SignalError type, so that errors.As with the +// literal `&SignalError{}` will return true. +func (e SignalError) As(target interface{}) bool { + switch target.(type) { + case *SignalError, SignalError: + return true + default: + return false + } +} + +// ErrSignal is returned by SignalHandler when a signal triggers termination. +var ErrSignal = errors.New("signal error") diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index 09217941147..69be62380db 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,14 @@ +## 2.27.4 + +### Fixes +- CurrentTreeConstructionNodeReport: fix for nested container nodes [59bc751] + +## 2.27.3 + +### Fixes +report exit result in case of failure [1c9f356] +fix data race [ece19c8] + ## 2.27.2 ### Fixes diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go index 30d8096cd62..48c69a1d835 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go @@ -9,6 +9,7 @@ import ( "path/filepath" "regexp" "strings" + "sync/atomic" "syscall" "time" @@ -159,12 +160,15 @@ func runSerial(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig t func runParallel(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite { type procResult struct { + proc int + exitResult string passed bool hasProgrammaticFocus bool } numProcs := cliConfig.ComputedProcs() procOutput := make([]*bytes.Buffer, numProcs) + procExitResult := make([]string, numProcs) coverProfiles := []string{} blockProfiles := []string{} @@ -224,16 +228,20 @@ func runParallel(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig args = append(args, additionalArgs...) cmd, buf := buildAndStartCommand(suite, args, false) + var exited atomic.Bool procOutput[proc-1] = buf - server.RegisterAlive(proc, func() bool { return cmd.ProcessState == nil || !cmd.ProcessState.Exited() }) + server.RegisterAlive(proc, func() bool { return !exited.Load() }) go func() { cmd.Wait() exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() procResults <- procResult{ + proc: proc, + exitResult: cmd.ProcessState.String(), passed: (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE), hasProgrammaticFocus: exitStatus == types.GINKGO_FOCUS_EXIT_CODE, } + exited.Store(true) }() } @@ -242,6 +250,7 @@ func runParallel(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig result := <-procResults passed = passed && result.passed suite.HasProgrammaticFocus = suite.HasProgrammaticFocus || result.hasProgrammaticFocus + procExitResult[result.proc-1] = result.exitResult } if passed { suite.State = TestSuiteStatePassed @@ -261,6 +270,8 @@ func runParallel(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ { fmt.Fprintf(formatter.ColorableStdErr, formatter.F("{{bold}}Output from proc %d:{{/}}\n", proc)) fmt.Fprintln(os.Stderr, formatter.Fi(1, "%s", procOutput[proc-1].String())) + fmt.Fprintf(formatter.ColorableStdErr, formatter.F("{{bold}}Exit result of proc %d:{{/}}\n", proc)) + fmt.Fprintln(os.Stderr, formatter.Fi(1, "%s\n", procExitResult[proc-1])) } fmt.Fprintf(os.Stderr, "** End **") } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go index ef76cd099e6..9d5f590011e 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go @@ -208,9 +208,12 @@ func (suite *Suite) PushNode(node Node) error { // Ensure that code running in the body of the container node // has access to information about the current container node(s). + // The current one (nil in top-level container nodes, non-nil in an + // embedded container node) gets restored when the node is done. + oldConstructionNodeReport := suite.currentConstructionNodeReport suite.currentConstructionNodeReport = constructionNodeReportForTreeNode(suite.tree) defer func() { - suite.currentConstructionNodeReport = nil + suite.currentConstructionNodeReport = oldConstructionNodeReport }() node.Body(nil) diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index b9c1ea9856a..66cbbcf3c3a 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.27.2" +const VERSION = "2.27.4" diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index b7d7309f3f2..64b33e8b7cc 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,8 @@ +## 1.38.3 + +### Fixes +make string formatitng more consistent for users who use format.Object directly + ## 1.38.2 - roll back to go 1.23.0 [c404969] diff --git a/vendor/github.com/onsi/gomega/format/format.go b/vendor/github.com/onsi/gomega/format/format.go index 96f04b21045..6c23ba338bc 100644 --- a/vendor/github.com/onsi/gomega/format/format.go +++ b/vendor/github.com/onsi/gomega/format/format.go @@ -262,7 +262,7 @@ func Object(object any, indentation uint) string { if err, ok := object.(error); ok && !isNilValue(value) { // isNilValue check needed here to avoid nil deref due to boxed nil commonRepresentation += "\n" + IndentString(err.Error(), indentation) + "\n" + indent } - return fmt.Sprintf("%s<%s>: %s%s", indent, formatType(value), commonRepresentation, formatValue(value, indentation)) + return fmt.Sprintf("%s<%s>: %s%s", indent, formatType(value), commonRepresentation, formatValue(value, indentation, true)) } /* @@ -306,7 +306,7 @@ func formatType(v reflect.Value) string { } } -func formatValue(value reflect.Value, indentation uint) string { +func formatValue(value reflect.Value, indentation uint, isTopLevel bool) string { if indentation > MaxDepth { return "..." } @@ -367,11 +367,11 @@ func formatValue(value reflect.Value, indentation uint) string { case reflect.Func: return fmt.Sprintf("0x%x", value.Pointer()) case reflect.Ptr: - return formatValue(value.Elem(), indentation) + return formatValue(value.Elem(), indentation, isTopLevel) case reflect.Slice: return truncateLongStrings(formatSlice(value, indentation)) case reflect.String: - return truncateLongStrings(formatString(value.String(), indentation)) + return truncateLongStrings(formatString(value.String(), indentation, isTopLevel)) case reflect.Array: return truncateLongStrings(formatSlice(value, indentation)) case reflect.Map: @@ -392,8 +392,8 @@ func formatValue(value reflect.Value, indentation uint) string { } } -func formatString(object any, indentation uint) string { - if indentation == 1 { +func formatString(object any, indentation uint, isTopLevel bool) string { + if isTopLevel { s := fmt.Sprintf("%s", object) components := strings.Split(s, "\n") result := "" @@ -416,14 +416,14 @@ func formatString(object any, indentation uint) string { func formatSlice(v reflect.Value, indentation uint) string { if v.Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 && isPrintableString(string(v.Bytes())) { - return formatString(v.Bytes(), indentation) + return formatString(v.Bytes(), indentation, false) } l := v.Len() result := make([]string, l) longest := 0 - for i := 0; i < l; i++ { - result[i] = formatValue(v.Index(i), indentation+1) + for i := range l { + result[i] = formatValue(v.Index(i), indentation+1, false) if len(result[i]) > longest { longest = len(result[i]) } @@ -443,7 +443,7 @@ func formatMap(v reflect.Value, indentation uint) string { longest := 0 for i, key := range v.MapKeys() { value := v.MapIndex(key) - result[i] = fmt.Sprintf("%s: %s", formatValue(key, indentation+1), formatValue(value, indentation+1)) + result[i] = fmt.Sprintf("%s: %s", formatValue(key, indentation+1, false), formatValue(value, indentation+1, false)) if len(result[i]) > longest { longest = len(result[i]) } @@ -462,10 +462,10 @@ func formatStruct(v reflect.Value, indentation uint) string { l := v.NumField() result := []string{} longest := 0 - for i := 0; i < l; i++ { + for i := range l { structField := t.Field(i) fieldEntry := v.Field(i) - representation := fmt.Sprintf("%s: %s", structField.Name, formatValue(fieldEntry, indentation+1)) + representation := fmt.Sprintf("%s: %s", structField.Name, formatValue(fieldEntry, indentation+1, false)) result = append(result, representation) if len(representation) > longest { longest = len(representation) @@ -479,7 +479,7 @@ func formatStruct(v reflect.Value, indentation uint) string { } func formatInterface(v reflect.Value, indentation uint) string { - return fmt.Sprintf("<%s>%s", formatType(v.Elem()), formatValue(v.Elem(), indentation)) + return fmt.Sprintf("<%s>%s", formatType(v.Elem()), formatValue(v.Elem(), indentation, false)) } func isNilValue(a reflect.Value) bool { diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index fdba34ee9dd..55c0e895e2f 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.38.2" +const GOMEGA_VERSION = "1.38.3" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go index 10b6693fd63..40ba15c5e7f 100644 --- a/vendor/github.com/onsi/gomega/matchers.go +++ b/vendor/github.com/onsi/gomega/matchers.go @@ -515,8 +515,8 @@ func HaveExistingField(field string) types.GomegaMatcher { // and even interface values. // // actual := 42 -// Expect(actual).To(HaveValue(42)) -// Expect(&actual).To(HaveValue(42)) +// Expect(actual).To(HaveValue(Equal(42))) +// Expect(&actual).To(HaveValue(Equal(42))) func HaveValue(matcher types.GomegaMatcher) types.GomegaMatcher { return &matchers.HaveValueMatcher{ Matcher: matcher, diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go index 9e16dcf5d6c..16630c18e34 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go @@ -39,7 +39,7 @@ func (matcher *HaveKeyMatcher) Match(actual any) (success bool, err error) { } keys := reflect.ValueOf(actual).MapKeys() - for i := 0; i < len(keys); i++ { + for i := range keys { success, err := keyMatcher.Match(keys[i].Interface()) if err != nil { return false, fmt.Errorf("HaveKey's key matcher failed with:\n%s%s", format.Indent, err.Error()) diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go index 1c53f1e56af..0cd7081532e 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go @@ -52,7 +52,7 @@ func (matcher *HaveKeyWithValueMatcher) Match(actual any) (success bool, err err } keys := reflect.ValueOf(actual).MapKeys() - for i := 0; i < len(keys); i++ { + for i := range keys { success, err := keyMatcher.Match(keys[i].Interface()) if err != nil { return false, fmt.Errorf("HaveKeyWithValue's key matcher failed with:\n%s%s", format.Indent, err.Error()) diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go index 8c38411b283..72edba20f71 100644 --- a/vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go @@ -1,6 +1,9 @@ package edge -import . "github.com/onsi/gomega/matchers/support/goraph/node" +import ( + . "github.com/onsi/gomega/matchers/support/goraph/node" + "slices" +) type Edge struct { Node1 int @@ -20,13 +23,7 @@ func (ec EdgeSet) Free(node Node) bool { } func (ec EdgeSet) Contains(edge Edge) bool { - for _, e := range ec { - if e == edge { - return true - } - } - - return false + return slices.Contains(ec, edge) } func (ec EdgeSet) FindByNodes(node1, node2 Node) (Edge, bool) { diff --git a/vendor/github.com/owncloud/reva/v2/internal/grpc/services/gateway/usershareprovider.go b/vendor/github.com/owncloud/reva/v2/internal/grpc/services/gateway/usershareprovider.go index 64a70b38ffe..086bde3a735 100644 --- a/vendor/github.com/owncloud/reva/v2/internal/grpc/services/gateway/usershareprovider.go +++ b/vendor/github.com/owncloud/reva/v2/internal/grpc/services/gateway/usershareprovider.go @@ -803,7 +803,9 @@ func isSpaceManagerRemaining(grants []*provider.Grant, grantee *provider.Grantee // RemoveGrant is currently the way to check for the manager role // If it is not set than the current grant is not for a manager and // we can just continue with the next one. - if g.Permissions.RemoveGrant && !isEqualGrantee(g.Grantee, grantee) { + // Expirable grants won't be computed because the space needs + // at least one permanent space manager. + if g.Permissions.RemoveGrant && !isEqualGrantee(g.Grantee, grantee) && g.Expiration == nil { return true } } diff --git a/vendor/github.com/owncloud/reva/v2/pkg/storage/utils/decomposedfs/spaces.go b/vendor/github.com/owncloud/reva/v2/pkg/storage/utils/decomposedfs/spaces.go index e49502a200e..1abf6376925 100644 --- a/vendor/github.com/owncloud/reva/v2/pkg/storage/utils/decomposedfs/spaces.go +++ b/vendor/github.com/owncloud/reva/v2/pkg/storage/utils/decomposedfs/spaces.go @@ -676,6 +676,13 @@ func (fs *Decomposedfs) UpdateStorageSpace(ctx context.Context, req *provider.Up }, nil } } + + // capture old image id to delete it after successful update + var oldImageID string + if v, e := spaceNode.XattrString(ctx, prefixes.SpaceImageAttr); e == nil { + oldImageID = v + } + metadata[prefixes.TreeMTimeAttr] = []byte(time.Now().UTC().Format(time.RFC3339Nano)) err = spaceNode.SetXattrsWithContext(ctx, metadata, true) @@ -683,6 +690,21 @@ func (fs *Decomposedfs) UpdateStorageSpace(ctx context.Context, req *provider.Up return nil, err } + // housekeeping: if the space image is being updated, remove the old one + if newImageID, ok := metadata[prefixes.SpaceImageAttr]; ok { + if oldImageID != "" && oldImageID != string(newImageID) { + delRef := &provider.Reference{ + ResourceId: &provider.ResourceId{ + SpaceId: spaceID, + OpaqueId: oldImageID, + }, + } + // delete old image after new image was successfully set + _ = fs.Delete(ctx, delRef) + // silently ignore failed deletion + } + } + if restore { if err := spaceNode.SetDTime(ctx, nil); err != nil { return nil, err diff --git a/vendor/github.com/prometheus/alertmanager/asset/asset.go b/vendor/github.com/prometheus/alertmanager/asset/asset.go index b4959299b1d..11d3736275a 100644 --- a/vendor/github.com/prometheus/alertmanager/asset/asset.go +++ b/vendor/github.com/prometheus/alertmanager/asset/asset.go @@ -12,7 +12,6 @@ // limitations under the License. //go:build dev -// +build dev package asset diff --git a/vendor/github.com/prometheus/alertmanager/asset/assets_vfsdata.go b/vendor/github.com/prometheus/alertmanager/asset/assets_vfsdata.go index 3dd06e052bd..85ce36dd3b6 100644 --- a/vendor/github.com/prometheus/alertmanager/asset/assets_vfsdata.go +++ b/vendor/github.com/prometheus/alertmanager/asset/assets_vfsdata.go @@ -150,9 +150,9 @@ var Assets = func() http.FileSystem { "/static/script.js": &vfsgen۰CompressedFileInfo{ name: "script.js", modTime: time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC), - uncompressedSize: 110586, + uncompressedSize: 111077, - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xdc\xbd\x0b\x7b\xdb\xb8\xae\x28\xfa\x57\x1c\xed\x6c\x2f\x72\x0c\xab\x76\x5e\x6d\xe5\x70\xf9\xa6\xef\xf7\x23\x49\x3b\x99\x66\x72\x72\x28\x99\x76\xd4\xd8\x94\x4b\x51\x71\xd2\xd8\xfb\x6f\xdc\x1f\x74\xff\xd8\xfd\x08\xea\xed\xc7\x74\xf6\x5e\xfb\x9c\x73\xef\x5a\xf3\xa5\xb2\x04\x82\x20\x09\x82\x00\x08\x82\x5b\xc3\x44\x06\x3a\x8c\x24\x91\xf4\xde\x49\x62\xd1\x88\xb5\x0a\x03\xed\xf4\xb2\x0f\x0d\x45\x24\x28\xd0\xf4\x5e\x09\x9d\x28\xd9\xd0\x2e\x67\x12\xb4\x3b\x64\x0a\xf4\x22\x07\x1b\x93\x02\x44\x91\x1d\xd0\x90\xa3\x56\xf9\x87\x72\x6d\x19\x3a\xa2\x40\xd2\xc5\x82\x16\xa8\x06\x44\x94\x50\xed\x82\x28\x50\xe9\x65\x54\x1b\xb1\x0b\xa2\xc1\xe2\x2f\x57\xa0\x49\x52\xaa\x60\x0f\x92\xa2\x02\xb1\x8c\xed\xef\xd6\x99\x10\x01\x79\xad\xe5\x6a\x03\xc2\x4b\xd5\xee\x03\x2f\xaa\x4d\x96\x11\xfe\x0b\x28\xe1\x24\x81\x32\x2d\x65\x62\x04\x09\x4a\xc4\x1c\x40\x50\x10\xc3\x97\x71\xfe\xf7\xd0\x17\x10\x0e\x35\x0a\xcb\x24\x26\x24\x2a\x91\xf8\x10\xa2\x82\xc4\x60\x19\xed\xff\x32\xaa\x23\x12\xc0\x32\xdd\x65\xc2\x39\x09\x4b\x84\x3f\x82\xb0\x20\x3c\x5a\xc6\xfc\xbf\xb3\x2d\x21\x89\x60\x65\x6b\xca\xcd\x89\xc8\xb0\xd4\x9c\xc7\x30\x2c\x9a\x13\x2e\x23\xff\x3f\xac\x85\x43\x12\xc2\xba\x36\x96\x1b\x39\xab\x89\xb9\x1d\xc6\x98\x74\x79\x5f\xba\x43\x62\xde\x7b\xa6\x1e\xa2\x4b\x25\xee\x6c\x09\x28\x88\xdc\xad\x96\x01\x91\x97\x22\xa2\x54\xf0\x36\x2b\x08\x45\x93\xf7\xea\x45\x21\x29\x17\x26\x49\xa9\xfc\x4d\x51\x1e\x8a\x7e\xdc\x5f\xc6\x00\xbc\x86\x83\xf0\x12\x9a\x61\x19\x0d\x14\x63\x74\xb0\x0a\x11\x04\xcb\xa8\x48\x50\xc2\xe6\x57\xb1\x41\xc1\x06\x0f\x57\xe3\x83\x68\x25\x46\x12\x95\x90\xc6\x75\xa4\x50\x30\xdc\xa3\x75\x68\x21\x5c\x87\x98\x84\x74\x71\xc3\x55\x23\x64\x03\x52\xb0\x8a\x1d\xf6\x61\xa4\x88\xf9\x26\xd8\x91\x52\xfc\x8e\x48\x0a\x09\xeb\xf4\x92\x43\xd9\x4b\x5a\x2d\x2a\xce\x93\x0b\xa6\x89\x6a\x25\xb4\x97\xad\x2d\x0b\x0a\xdb\x6c\x5c\xc1\x54\xe0\xd1\x05\x1e\xc1\x3a\x3d\x71\x28\x9b\x4d\xe5\xfa\x3d\xd1\x6a\x51\x7d\x2e\x2e\x98\x72\x39\x28\x66\x5e\xe5\x0b\xeb\x58\xc8\x91\xbe\x62\x02\x2e\xcd\xb2\x45\x17\x14\x26\x8c\xd4\x2b\xc8\x26\xe2\xb9\xbc\x58\x50\xd8\xd4\x90\x0c\x21\x24\x29\x2d\x82\x02\x67\x9d\x1e\x3f\x14\x3d\xde\x6a\xd1\xe4\x9c\x5f\x30\x7d\xce\x2f\x32\x0a\x92\x73\x79\xc1\x14\x24\x0b\x0a\xeb\x9b\xa5\x32\xac\x59\x4f\xe9\x56\x37\xeb\x2b\x5d\xf4\x95\x3a\x4f\x72\xbc\xe2\x5c\x5f\x30\x09\xe2\xd7\xe9\x35\xc8\x04\x22\x53\xcc\xcc\x4c\x7d\x9e\x5c\x80\xca\xbb\x5e\xfd\x22\xa6\x76\xb7\xd7\x39\x64\xa2\x27\xda\xed\x1c\x91\xa8\x21\xa2\xbd\xbf\xd3\xd6\x15\x2d\x95\xc4\xb4\xb5\xc2\x16\xff\xe5\x51\x41\x69\xd4\xe2\x60\x46\x27\xc7\x9c\xac\xc4\x9c\x73\x4f\x3c\x0e\x03\x81\x2d\xf8\x0b\x0a\x54\x41\x81\x6c\x0b\xe0\x19\x1d\x2d\x92\xe4\xd4\x1d\x26\xfd\xec\xd1\x4b\x28\x85\x80\x75\x7a\xc1\xa1\xe8\x05\xad\x16\xe5\xe7\x81\x19\xdf\xe0\xa2\x67\x70\xda\x2f\x49\xf6\xa5\x25\x0c\x4f\x05\xf9\xd8\xf3\x15\xcc\x54\x1a\xc5\x35\x9f\x82\x48\xc6\xd1\x58\xb8\xe3\x68\x44\x64\xcb\xf1\x1a\x4e\xeb\x8a\x50\x0a\x6a\x41\x0b\xcd\xf4\x8a\x64\xf0\xce\x61\x28\xb5\x50\x92\x8f\xe3\x7f\x3a\x85\xf8\x98\x9a\x75\x40\x5f\xa9\x68\xd6\x78\xae\x54\xa4\x88\x73\xa5\xf5\x34\xf6\x1e\x3c\x18\x85\xfa\x2a\xf1\xdd\x20\x9a\x3c\x10\xe3\xc9\x83\x20\x52\xe2\x81\x3f\x8e\xfc\x07\x5d\xb7\xe3\x76\x1e\x5c\x85\x52\xc7\x0f\x9c\x96\x6c\x39\xee\x64\xe0\x94\x24\xd2\xa8\xc6\x22\x20\xd8\xf9\x05\x24\xec\x1a\xbb\xb9\x03\x82\xf6\x92\x66\x93\x68\x26\xdc\x69\x34\x25\x94\xf6\xcc\x37\xed\x72\xd0\xae\x8f\xdf\x4b\xc3\x99\x63\xbd\x2e\x96\x92\x70\x48\xba\x9d\xce\xa1\xa6\x19\x3b\xb9\xd3\x24\xbe\x22\x97\x58\x2f\x85\xad\x4e\x2f\x1c\x12\xc9\x18\x53\x29\x84\x7d\xe3\x44\xfe\x77\x11\x68\x67\x8b\xe9\xbb\xa9\x88\x86\x0d\x39\x9f\xcb\x64\x3c\x36\x42\x32\x7f\xca\x8a\x38\x59\xc5\x0e\xcb\xc1\x9b\xcd\x29\xd9\xa7\xb0\xd5\xed\x65\x6d\x4b\x1a\xa1\x6c\x48\x77\xfb\xb0\xd3\x6c\x12\xc9\x7c\x6d\x64\x99\x32\xff\x1a\x42\x24\x0d\x87\x64\xeb\x9a\x48\x9c\x99\xe6\x8f\x6e\x75\x4d\xf3\x52\xaa\xba\xbd\x8c\x3c\x14\xb9\x47\x6c\x4c\x46\x14\x4e\xd8\xea\x01\xdf\x1a\xa5\x8c\x5b\x8c\xee\x69\xc6\xb8\x2b\x5b\x97\x75\x0f\xf6\x44\xbf\xe3\xc9\x43\xd5\x6f\x77\xbd\xae\xe9\x8b\x2d\xe9\x6e\xa7\xdf\x89\x66\xa7\x44\x1a\x49\xeb\x72\x4a\xe7\xf3\xf4\xb7\x0f\xca\xf5\x29\xed\x6b\xcf\xfc\x0a\x40\xb9\x01\xc5\x76\xf7\xa4\xeb\xa3\x9c\x6e\x36\xb7\xaa\x65\x7b\x92\x61\x39\x23\xb1\xf3\x21\xd4\xf3\xb9\xc1\xd6\xef\x7a\xca\xf5\x4d\xfd\x1d\xbb\xc0\x3c\x5f\xd3\x4c\xdb\x28\x7a\xd8\x59\x50\xf8\xb6\x56\xb4\xa7\x40\xdd\xf5\xd3\xa3\x73\x78\x9a\x4d\xf4\xb5\x10\x2c\x03\xa1\xf0\x7d\x99\x9c\xf4\x6b\x2e\x07\x0f\x3b\xfd\x1b\xed\xa9\xfe\x40\x7b\x63\xbd\xa0\xf0\x94\x75\x8a\xa1\xb8\x2c\xa3\xbe\xe7\x9e\x04\xdf\x53\x8b\x82\x7d\xdf\x54\x65\x51\x06\x01\x81\xa7\x4b\x50\xef\x2c\x96\x6c\xda\xdc\x2f\xb0\xc7\x35\x72\x19\xb5\x4b\xc4\xb9\xbe\x28\x5e\x2a\xfb\x52\x99\x97\xb9\x6c\x35\xa5\x3f\xb0\x31\x79\x55\xe2\x94\x57\x16\xb1\xe1\x13\x63\xbd\xca\x51\x89\xad\x73\x3e\x69\xa9\x94\x35\xfc\xec\x95\xea\x59\x21\xff\xda\x0e\x72\x3a\xc2\xbd\x92\xa4\x36\xdc\x60\xdf\x52\xc1\x84\xeb\x17\xa0\xd9\xf8\x23\x3d\x67\xec\x7e\xdb\xeb\x2c\x0a\x82\x5e\x57\xfa\x6b\xdb\xeb\x42\xde\x67\x06\xfe\x23\x1b\x93\xd7\x25\xfa\x8f\x8d\x98\xca\xaa\x55\xec\x0c\x34\x93\xa9\xec\xed\xe9\x76\xbb\x47\x95\xa9\xf8\x5c\x57\x17\xae\xbc\xf8\xd7\x6a\xf1\xf3\x8b\x12\xd9\xca\x4a\x0f\xe9\xf2\x52\x49\x03\xf7\x73\xa3\x0a\x74\x7e\xd1\xc3\x39\xa0\x8d\x76\x62\x38\x1e\x34\xd3\xa6\x17\x2c\x3a\x5c\x9d\x50\xa4\xf1\x42\x9c\x1d\x1b\x1d\x97\xc2\x33\x46\x74\x0d\xb3\x11\x6b\xb9\x50\x29\xe3\x6e\x36\x45\xb5\x02\xc0\x6e\xa6\x89\xad\xe6\xae\xa8\x06\x44\xb5\xaa\xc4\x54\x15\x2c\xd5\x63\x14\xea\xac\x26\xbe\x54\x53\xb3\x99\xac\xaa\x0e\x12\x96\xb8\x3e\xe5\xb6\xd2\xdb\x6a\xa5\x90\x54\x2b\xe6\xa6\x62\xb1\xa2\x62\xa3\x89\x67\x55\x07\xab\xab\x6e\x36\xf9\xfa\xfa\x81\x33\xee\xfa\x34\xb0\x54\xdc\x2c\x53\x01\xbc\x4a\x49\x50\x9b\xfd\x1a\x0a\xa3\xe7\x98\x18\xa6\x70\xe3\x48\xe9\x75\x22\x06\xa5\x39\x8a\xf2\x05\xfe\x07\x2f\x2b\xf2\xe8\x17\xb1\xb1\x99\x81\x2c\xf3\x25\x63\x6c\xac\xfb\x1d\xcf\x3c\xdc\x68\x94\xc9\x58\xc1\x3a\x41\x25\x5b\x46\x07\x83\x17\x6b\xa5\xa1\x6c\x6f\xd0\x13\xe4\x6f\x58\xfa\xf3\xfa\xd2\x0f\x36\x95\x7e\xa0\xe6\x1d\xfc\xfc\x9e\xeb\x2b\x77\x1a\xcd\xd6\xeb\x2a\xff\x2e\x57\xe1\xf9\x77\x26\xb3\x96\x77\xcc\x52\xdb\x9f\x92\x6e\x97\x7a\x9d\x43\xd5\x6c\xca\xc3\xce\x7c\xae\xcc\xea\xd9\x39\x94\x7d\xd5\x92\x5e\xaa\x6d\x62\x65\x5c\x73\xb9\x43\x51\x04\xbd\x67\xf8\x26\x10\xe1\x18\xbe\xd8\xe7\xe1\x38\x8a\x14\x3c\xb1\x3f\x54\x94\xc8\x01\xfc\xb0\x3f\xc6\xd1\xa8\xb7\xae\x39\xcd\xe6\xa6\xc6\xce\xe7\x9b\xbe\x6e\x31\x66\x94\x2b\x43\xcf\x27\xb6\x69\xac\x7a\x7f\xeb\xe3\xaf\xe9\xd2\x76\x55\xe0\x4c\xb9\xc1\x15\x57\x4f\xa3\x81\x38\xd2\x24\xa1\x3d\x7e\xb8\xbf\xbf\xf3\xf8\x60\x3e\xdf\x3f\xd8\xed\x3e\x3e\xe4\x7d\x52\xd6\xb8\xc1\xa8\xe0\x5e\xf9\x55\x4b\x9d\x27\xad\x2e\x7e\x61\x3b\x74\x91\xab\x50\xdf\xa3\x50\x12\xc7\xa1\x1b\x0d\x9b\xf3\x0b\xa8\xe8\xc6\xd6\x06\xc9\x49\x33\x2a\x4e\xb0\x44\x61\xd2\x6a\x41\x50\xa5\x32\x98\xcf\x09\x6f\xd9\x02\x86\x42\x90\x84\x53\x23\x0a\x70\x66\xf3\x9c\x2c\x5d\x22\xab\xf7\xb7\xec\xa2\x8c\x26\x6d\x69\xd2\xbf\x4c\x93\xce\x69\xb2\xd6\x90\x59\xcb\x16\x85\x36\x8e\xa3\xff\x96\xfd\x0a\x2d\xc6\xa4\x4a\xe9\x48\x18\x1a\x55\xbc\x4a\x87\xa0\x3d\x5b\x47\xc2\xf8\x3f\xd9\xfe\xc1\xee\x4e\xa7\xd9\xdc\x7f\xb8\xbb\xb7\xfb\x4f\xc6\xfb\xfa\xbc\xdd\x16\x17\xad\xc4\x4b\xaa\x14\xc0\xef\xeb\x78\x4f\xb9\xf1\x74\x1c\x1a\xa9\xb5\xa0\xf0\xc7\x7a\x28\xec\x53\x04\x92\x92\xfd\xa2\xb5\xf4\x0b\x5c\x6b\x17\xe2\xfb\xd4\x80\x32\x6b\x71\x52\xe5\x05\x4d\x51\x31\x27\x82\x25\xf5\xe6\x26\x7d\x75\xde\x6e\xeb\x8b\x96\xf0\x0a\xfd\xb8\xb3\xc8\x14\xe5\xb4\xdf\x95\x64\xff\x32\x32\xb6\x7e\x9d\x8e\xee\x22\x27\x88\x82\x96\xeb\xf4\xf3\xad\xff\x50\x6e\x28\x07\xe2\xf6\xe3\xd0\xf6\xae\x58\x07\x6a\x04\x61\x0d\x36\x59\x0b\x9b\xe9\x39\x87\x79\x0b\x8d\xf6\x3d\xe6\xb1\x7e\x9d\x23\x60\xf9\xb7\x76\x06\xbe\xa0\xc0\x97\x71\xda\xae\xca\x55\xa7\x70\x48\xf4\x61\x37\xd3\xf6\xce\x4a\x8a\x5d\x07\x50\x0d\x69\x77\x0f\x89\x28\xd3\x8a\xd6\x59\xa6\x7d\x08\x0a\xa2\xc5\x74\x55\xe7\xc0\x91\x0a\xd6\x35\xc7\xa8\x7a\x1d\x18\x64\xba\xde\x3a\x51\x69\xc0\xba\x20\x0a\xb0\xc2\xd3\xba\x04\xb7\x0b\x43\x4f\xc2\x28\x53\x1d\x87\x9b\xaa\xde\x43\xa5\xfb\xca\x93\x8b\x94\x50\x7f\x6d\xbf\xe3\xcf\x73\x75\x61\x46\x27\xde\x30\x4f\x52\x38\xd0\x08\x39\x96\xab\x95\xbb\x3a\x30\x88\x8b\xf5\xfa\xd9\x32\x30\x24\x17\x9b\xb4\xaa\x55\x05\x80\x63\x91\x64\x65\x91\x92\x2f\xb3\x56\x08\x02\x2c\xc6\xd7\x14\x2b\x39\x2d\x97\x0a\x42\x84\x45\xa3\xb5\x45\x4b\xae\xc9\x15\x85\x21\xbc\x58\xb5\xfa\x68\x75\x97\x15\xba\x31\xaf\xde\x9c\x7c\xfc\xe0\x4e\xb9\x8a\x05\x6a\x66\x01\xd7\xc1\x55\xc9\x99\x3d\xd1\x64\x46\xae\x34\x38\xa7\x57\x61\xdc\x08\xe3\x86\x8c\x74\xe3\x86\x8f\xc3\x41\xc3\x94\xdc\x6a\x38\x2d\xe9\x4e\x44\x1c\xf3\x91\x00\x83\xc0\x28\x47\x03\xc3\x05\x37\xb2\xc4\x66\x37\x69\xed\xf1\x2c\x44\xfc\xee\x36\xbd\x0f\x78\x2c\x1a\xbb\x5e\xea\x20\xf0\xa3\x68\x2c\x78\xc9\x3f\xa0\xfa\x33\xa3\x2c\x7a\x57\x92\x38\xbc\xf1\xe4\xe3\xc7\x77\x8e\xd1\xfa\xb0\xd4\x4e\x56\x4a\x26\x13\x5f\xa8\xc2\x4a\x57\x7d\x04\x97\x8d\xd7\x1f\x4e\x0d\xb8\x47\xd4\x21\x6b\xef\x74\xf7\x1e\xee\x3d\xda\x3d\xd8\x7b\x38\x9f\x17\xcf\x87\x4c\xcd\xe7\xa4\x33\x57\xd4\x68\x22\xb4\xd9\x24\x5b\x61\xfc\x22\x94\xa1\x36\x5d\x31\x9f\xab\x7f\xef\xd2\x3a\x3a\x24\xc9\xd2\xb0\x57\xa3\x61\x0d\xe1\x2f\xde\x7d\x3c\x3a\x2d\x28\x3f\xc8\x4a\xd5\xed\xc6\xac\x94\x6a\x84\x32\xd6\x5c\x06\xe6\xe5\x09\x02\xe1\x97\x96\xe3\x64\x28\x4f\x4e\x8f\x5f\x7f\x78\x59\xe0\x7c\xec\x65\xb2\x2d\x75\xba\x98\x02\xd2\x0d\x2c\xbc\x79\x59\xc0\xee\x67\xb0\xa5\x96\x3c\xcc\xde\xa1\x96\xe4\x86\xb1\xd5\x96\x14\xed\x6f\x4b\xeb\xba\x80\xe3\xac\xee\x77\xaf\x4f\x4a\xad\x79\xf4\xd7\x25\x27\x32\x2d\x2a\x1b\x47\xc7\xc7\x47\x7f\x14\x85\xbb\x1d\x2f\x93\x9f\x83\x95\x0e\x25\x55\xb8\x91\xe6\xf3\xad\xcc\x44\xcf\xc4\x6b\x8a\xf4\xe3\x93\x37\xcf\x9f\x9e\x36\x66\xa1\xbe\x6a\xf0\xc6\x30\x14\xe3\x41\x43\xf2\x89\x18\x34\xfe\xa7\xd3\xd2\x2d\xe7\x7f\x62\x85\x56\x0a\xdf\xa4\x44\x9d\xeb\xc2\xc5\x19\x0a\x22\x68\x5f\x78\xc8\xe8\x53\x6d\x66\x10\x5a\x3d\x96\xc4\xae\x67\xc8\x13\xb8\xc2\xd5\xdb\x58\x23\xa4\x68\x5d\x38\x24\x2a\x5f\x65\x74\x05\xac\xf1\xee\xe3\x87\x97\xcf\x8f\x1b\x1c\x71\x35\x3e\x08\x31\x68\xe0\x62\xd0\x40\x62\x1b\x7e\xa2\x1b\x91\x1c\xdf\x35\x62\x21\x1a\x4e\x2b\x43\xd3\x72\x1a\x42\x6a\x15\x8a\x18\x2b\xf8\x85\x96\x8c\xea\x2d\xd9\xf1\xfe\xb2\x8b\xff\xa2\x81\xb6\xa7\xf3\xee\x4c\x80\x33\xbb\xc4\x25\x76\x60\xb0\xd9\x57\x3c\xfe\x38\x93\x9f\x54\x34\x15\x4a\xdf\x91\x84\xd2\xfb\x12\xb5\xc9\x85\x55\x16\x90\x54\x5a\x16\x31\x53\x0d\x49\x4a\x2f\x67\xaf\xc9\x25\xb1\xbf\xa0\x50\x5e\x67\x9a\xbc\xd6\xa4\x68\xd0\xae\x57\x18\xbf\xd2\x1d\x42\xc4\xa4\x3b\x82\x90\x75\x7a\xe1\x61\x94\xaf\xc8\xad\x56\x4a\x40\x74\x1e\x5e\xa4\x83\x53\xad\x5e\xf4\x02\x16\x10\x53\x59\xa9\xa6\x20\xab\x65\xcf\x2b\xc8\xaf\xf5\x34\xbe\xbe\xc2\x92\x46\x34\x88\xb4\xc4\x7e\x4e\xd7\x90\x9d\x81\x6f\xa8\xea\xf9\xae\xdf\xf3\x99\xef\xfa\x29\x31\xbe\xf5\xe9\x84\x43\x52\x23\x65\xc8\x5e\x1b\x84\x30\xcc\x89\x99\x68\x72\x87\x2d\x1f\xd2\xbc\xe9\x5e\x4d\x38\x5b\x17\x51\xfa\xb5\x53\x9a\xe1\xd2\xb4\xaa\xf0\xdb\x6c\x6f\x76\xcd\xaf\xdc\x1c\xb8\xb7\x3d\x6c\x05\x38\xee\x0d\xa4\x3d\x18\x54\x07\x70\xa4\xcd\xb2\x83\x03\x88\xfb\x09\x81\xcb\x4b\xfd\x69\xac\x84\x92\x4f\x7b\x52\xde\x42\x9d\x91\x48\x40\x4e\xc6\x8a\x0d\x55\xbb\xf1\x54\x94\xbe\xaa\x6a\x16\xf9\x12\xf5\xfc\x76\x2a\x02\x1d\xca\x91\x59\x94\x70\x31\x2a\xfc\xf2\x32\xf7\xd8\x2d\x7b\xb2\xa5\xbb\x6d\x56\x80\xdc\x85\xbb\xd5\xed\x2d\xad\x53\x1d\xaf\xda\xf5\xd2\xe5\x06\x8f\xcb\x7b\xe9\x32\x96\xae\x4b\xe9\xd2\x90\xca\xfa\x8a\xc8\xdd\xea\xd4\xc5\xb5\x1b\x20\x8e\x20\x13\xc3\xa9\x4c\xcd\x26\x6c\x0a\x36\x95\xb9\xff\x38\x17\x9c\x39\x86\x01\x62\x18\x34\x9b\xcb\x50\x25\x5a\x05\x42\x89\x55\x50\xbb\x05\xd4\x10\xa1\x86\xcd\xe6\xc8\x40\x8d\x40\xb9\xa3\x62\x1a\xe4\x50\x57\x08\x75\xb5\x0a\x57\xbe\xb8\x94\x10\x94\xd8\x6f\xb4\x5e\x6b\xde\x2a\x54\xee\x62\x10\x4a\xea\x73\x4f\x1c\x6a\xdc\xc8\x34\xcc\x67\x2a\xc6\x4d\xb5\x73\x71\xb1\xce\xfb\x3f\x5b\xab\x88\xa2\xc2\x63\x57\xdf\x70\x78\x47\x14\x18\x01\x08\x92\xb6\x1c\xa7\xac\x18\xdf\x95\x39\x50\x22\xce\xdb\x4d\xe6\x9d\xdd\xc8\xd4\xa9\x1e\x7c\x2d\x99\x41\x5b\xa0\x3b\x2a\xa1\xbb\xdf\xf6\x3a\xc0\x8d\xd2\x9c\x7f\x3e\xa9\x7e\xee\xd6\x3e\x9f\x56\x3f\xef\x80\xef\x49\x08\x3c\x53\x85\xd5\xd2\x9f\x6f\xd0\xd2\x77\x11\x7a\x80\x8a\x3f\x7c\xdb\x00\xb8\x57\x02\xc4\x56\x7c\x97\x65\x9f\xfc\x53\x24\x42\xa2\xeb\x19\x84\xf7\x5d\xb6\x5a\xa9\xa9\x80\x3d\x78\xe5\x9d\x5f\x2c\x32\x09\x79\x66\x60\x41\x16\x2d\xb8\x2c\xcf\xf8\x53\x49\xca\xd3\x5c\x92\x23\x49\x9e\x1a\x00\x4a\xcb\xf3\xfc\x4d\x4a\xa0\x74\xaf\xac\x81\xa4\x28\x20\x62\x6c\xf2\xbb\x4a\x4b\xca\x63\x51\x43\xff\xc6\x7e\x05\x5b\x0d\x56\x61\x9b\xf7\x41\xb2\xad\x2e\xbc\x92\xc6\x32\xcb\x2b\xc5\x0a\x8c\x9c\x78\x25\x53\x4f\x36\x85\xad\x0f\xa9\xbf\xdb\x94\xe8\xf4\x24\x7b\x25\xdd\xf8\x2a\x1c\x6a\x42\x7b\x74\xab\x1c\x15\x82\x3b\x3a\xca\x1d\xa6\x16\xb3\x34\xf3\xc9\xdd\x36\x2c\xde\xb1\xfb\x62\x5d\xf3\x4f\x0e\x37\x32\xb6\xe7\x08\xc5\x8f\xec\x51\xe5\x8e\x98\xf9\x19\xa2\x88\x35\x93\xc7\x36\xc8\x20\xc4\x0f\x3e\x31\xe8\xcc\x72\x93\x41\x2e\xc4\x38\x16\x86\x5a\x0c\x1e\xc9\xf7\x19\xdc\xa1\x1b\x60\xd5\x7e\xa5\x27\x0c\x1e\x69\xba\x10\x5d\x10\xe1\x90\xec\x5b\x6a\x52\xf2\x94\x7b\x55\x9d\x83\x69\xc5\x43\xac\xf8\x2a\x6b\x32\xc5\x4a\x1b\x86\x06\xc3\x5d\xe8\x84\xec\x78\x5d\x63\x0a\x1a\x50\x08\x3d\xe5\x8e\x16\x90\x95\x1d\x2c\x16\x0b\x22\x69\x0f\x7b\x7b\xb1\xd8\x60\xcd\xbd\x36\x03\x25\x40\xba\xc1\x33\xf3\xe7\xb1\xf9\xb3\x57\x2c\x08\xcb\x31\x37\xf4\x7e\xb1\xa8\xec\xe0\xbd\xae\x19\x72\x76\xed\x9a\x91\x81\x04\x09\xaa\xaf\xdc\xe1\x98\x8f\x62\xef\x26\x0a\x07\x8d\x0e\xed\xe1\x2a\x36\x9f\x4f\x49\xea\x16\x8d\xd8\xfd\x02\x42\x46\x02\xa6\x09\x2e\x65\x66\x25\x66\x9c\xf8\x10\x9a\x45\x71\x85\xed\x0f\x02\xa5\x94\x30\x1a\xd0\x47\x99\x79\xa7\x3e\x1a\xf1\xd4\x4b\x5c\xde\x6c\x12\xa2\x99\x9e\xcf\xef\x17\xf4\x5c\x5c\xb0\xc4\xe5\x04\xcd\x24\x30\x10\x2b\x10\x0a\x76\x3f\x42\x8b\xda\x92\xb8\x80\x84\x49\x37\x00\x6e\x74\x64\x30\x7a\x8e\x40\x3d\x67\x98\x6f\x4f\xb9\x57\xec\xa9\x24\x33\xf2\x5c\xe6\x1d\xd5\x28\x87\x2c\xe1\x17\x09\xf7\xdb\xde\x3e\xf8\x5e\x99\x19\xec\xde\x8d\x74\x79\xc5\x9d\xec\x6e\xf7\xef\x08\x07\x81\xc2\xcd\x0b\x9a\xcd\xa8\x7f\x8b\x31\x7d\xca\x0d\x41\xb9\xdf\xcd\xdb\x3b\x7c\x11\xf4\x95\x6b\x86\xda\xbc\x32\xc3\x00\xd2\xf5\x29\x5d\x90\xb2\x7f\x4d\x2f\x48\x04\x7e\x69\x80\x7c\xdb\x54\x33\x26\x02\xa4\xe9\xd6\x21\x09\x8d\xae\x00\x8a\xc2\x4b\x49\x22\x08\x5c\x1f\x12\x12\xd2\x1c\x47\xf5\x2d\xf0\xfe\xfd\x34\x52\x3a\xf6\xf8\xc2\xbb\x4f\x77\xb7\x24\xbb\x5f\xe0\x00\x1e\xff\xaa\x4c\x50\xee\x88\xd4\x45\xc2\x9a\xf5\x62\x46\xde\x49\x90\xee\x15\xa4\x62\x5b\x55\x59\xee\xeb\xe6\x68\x30\x14\xe6\xd7\x9e\x82\xb1\x11\xe8\x85\x6c\xfb\x59\x97\xe8\x13\xf3\xdd\xb4\xe1\xd9\x46\x51\x2e\x3d\x09\x51\xcd\x87\xf3\x32\x5f\x8c\x90\x85\x20\xc9\x76\x3a\x91\x2d\x5f\x48\xb2\xd5\x01\x05\x09\x2e\x74\x14\xcc\xef\x2e\xe8\xfc\xb7\xa4\x6f\xd2\xf5\xf4\x7e\xdb\x73\x86\xb7\x0e\x70\x2f\x39\x17\x17\xf3\xf9\x7d\xe8\x9d\xc1\x77\xef\xac\x12\xb5\xf6\xa2\x34\x6f\x53\x2d\x49\xe5\x5a\x52\xd7\xb3\x13\x40\xb9\xd7\xc0\x19\xe1\x2c\x81\x88\x09\x98\x11\xd9\xff\x28\xcf\xf9\x85\x2b\x3c\xfb\xef\xb0\xa2\xe7\x15\x5b\x89\x51\x4f\xe1\x76\xd5\x0f\x6a\x44\xe7\xd4\xc8\x8d\x7c\x09\x36\x3a\x62\xb1\x1d\x65\x26\x08\xd1\xe7\xc9\x85\xa9\x86\x43\xc2\x48\x82\xce\x66\x5a\xa2\x1b\x64\x3f\x71\x43\xf6\x9a\x70\x48\xdc\x90\x7a\x89\xfb\x3d\xfd\xf1\x9d\x42\x42\x73\x67\x42\x61\x48\x28\x77\xd2\x0b\x5c\xdf\x98\x04\xae\x4f\xb1\xad\x86\x39\x4d\x6b\xd3\x8a\x7b\x15\xb7\x05\x92\x91\xf6\x89\x1b\x81\x86\xfb\xa9\xa7\x5c\x09\x3f\x3c\xb1\xb0\xcb\x14\x87\xa8\xe8\xbc\xcf\xd8\xdc\x8f\xf2\x5c\x5e\x34\x9b\x53\xb2\x5b\xea\xd7\xf7\x55\xae\x43\x48\x40\x48\x76\x2f\xbc\x2f\x12\x94\xa7\x80\x7b\x4f\xe4\x02\xbe\xe6\x6b\xe0\x97\xb5\x5a\x4e\x25\x6c\xe5\x49\x3e\xe1\x35\x04\xec\xfc\x02\x22\x86\x98\x5d\x65\xa4\x9d\x66\x1d\xa8\x4d\x0f\x3b\x18\xb1\xd0\xa7\xe1\x44\x44\x49\x49\x66\x67\xab\x35\xa5\x0b\xd0\xf9\x60\x94\x3e\x07\x63\xc1\x55\x56\x4c\xa1\x3f\x28\x83\xb2\x75\xfa\x2c\xb4\xed\x72\x83\x35\x7e\xff\x9e\xca\x36\x2d\x69\xa1\x02\x06\x90\xb0\x88\x28\xb4\x09\xad\x79\x92\xe9\x8f\x1c\xc3\xa2\xf8\x05\x29\x02\xe5\xc2\x05\x85\xfb\x38\xf1\xe3\x40\x85\xbe\xa8\x88\xbd\x20\x5b\xd5\x17\x90\xc8\xd5\x20\x44\x9a\x25\x20\x48\x1d\xf6\x94\x96\x5c\xcb\xf4\xb0\x33\x9f\x07\xb8\x31\x80\xbe\xfc\x2e\x5d\xd8\x59\xfb\x43\xf6\xd6\x48\x9e\x55\x06\x0d\xee\x85\xd2\x4c\xcd\xfa\x24\x99\x93\xc8\x81\x18\x86\x52\x0c\x0a\xdb\x7c\x10\x05\xc9\x44\x48\xdd\xcf\x1e\xbc\xfb\xd2\x8e\xff\xdb\x5c\x39\xe2\xd3\xa9\x90\x83\xa7\x57\xe1\x78\x60\x3a\x7c\xd5\x02\x2b\x98\x70\x65\x34\x10\xc5\xb2\x31\xe5\x4a\x48\xfd\x21\x1a\x08\x57\x89\xe9\x98\x07\xc2\x22\xd8\x56\x44\x96\x97\xdc\x05\x05\x41\xe1\xbe\x22\x6f\x7e\x5f\xa9\xcb\x9a\x96\xfc\x51\xe1\xc7\xb2\x5b\xf4\x2f\x36\xc1\x3a\xa5\x31\xbf\xcf\x64\x08\xef\x89\x16\x4b\x5c\x7f\x3e\xef\x40\xba\x97\x95\x14\x5b\x6c\xad\x62\x97\x0a\x85\x6c\xe0\x05\x30\xf0\x06\x0a\xe3\x1f\x3d\x0d\x43\x8f\x83\xef\x09\xd4\x10\x48\xba\xe2\x83\x54\xff\x1d\x04\xfe\x1a\x89\x3b\xbf\x44\xa2\xdd\x99\x51\x9b\xb4\xf4\xef\x9e\xc4\x25\xc5\xf7\xba\x2d\xa2\xb0\x72\x5a\x19\x20\xad\x6a\x65\xf6\xcd\xf2\x03\x13\x4f\xc1\x75\xa6\x59\x2c\xd6\x08\x0e\xad\xc8\xb9\x04\x75\xb1\x42\xef\xb2\x7a\x63\xca\xb4\x42\x6d\x30\x86\x2c\x0e\xd0\xab\xb0\x64\x61\xcf\x0b\x74\xa7\x2b\xb6\x41\x21\xcc\xf1\x80\x58\x85\xa9\x08\x87\x46\x5c\x5c\x31\xb2\x71\x03\xa0\x84\x0e\x92\x55\x08\xcb\x61\xd2\x8b\x5f\xd8\x20\xa8\x20\x04\xbe\x0a\x65\x35\x72\x7a\xf1\x4b\x5b\x08\x35\xb4\x10\xac\x42\x5c\x8f\xa5\x5e\xfc\xe2\x36\xc3\x12\x72\x88\x56\xa1\x5f\x0e\xae\x5e\xfc\xf2\x66\xc4\x8a\x2a\x20\x5c\x55\xc9\xaa\x60\xeb\xc5\xfa\xc8\x0d\xa3\x9f\xf0\x8e\x53\xd2\x81\x28\x04\x1b\x66\x8a\xc3\xbb\x65\x60\x88\x36\xc2\xee\x54\x60\xc3\x8d\xb0\xbb\x65\xd8\xde\xba\x79\x80\xa0\x7b\x06\x54\x41\xe4\xdd\x0f\xb1\x84\x5e\x54\xa6\xea\x50\x15\xb2\xd4\x31\x6b\xd1\x54\x3b\xc6\xa0\x72\xa6\x8e\x27\xd7\xcc\x51\xd3\x09\x68\xae\x6d\xf7\x67\x84\x2b\x30\x0a\x07\xd1\x4c\x82\x64\x63\x81\xfb\x8b\x11\x35\x02\x47\xb8\xdb\x46\x34\xf7\xef\x88\x2f\x40\x1e\xee\xf6\x63\xe5\x8d\x15\xc4\xc2\xa8\xbe\xc2\xe5\xd4\x9b\x91\xa1\x48\xfd\xc4\x0b\x4a\xbd\x34\x3e\x0d\x44\xb6\x9b\xa7\x20\x5e\xd7\x0f\x8d\x4b\x22\xed\x72\x6c\x44\xa2\x19\xb3\xb5\x3d\x16\xbc\xf3\x0c\x68\xf0\x8e\x02\x7f\xe2\x29\x97\x3f\x01\x7e\x63\xfe\xbd\xa9\x74\x05\xca\xc5\x92\x2e\x78\xbf\x28\x85\x95\xe5\x4e\x23\x0e\x82\x69\x77\x1b\x12\xa6\x5d\x89\x21\x00\x51\xcf\x0c\xde\x16\x63\xa2\x4f\x34\x53\xa8\xba\x12\xf3\x0f\x33\xab\x97\x19\x2c\xc6\x98\x68\x36\x9d\x60\xcc\xe3\xd8\xfc\x48\xfa\x37\x8a\x68\x7b\x5a\x01\x55\x48\x4e\x3d\xfb\xf5\x03\x9f\x88\x1c\x42\x59\x08\x85\x10\x8b\xe5\x30\xb8\x1b\x55\xd1\xbb\x99\x3c\x57\x17\x3d\xf3\x87\x89\xbe\x68\x39\x0d\xa7\xa5\xbd\xd2\x79\xb5\x6d\x55\x75\x7f\x6d\x67\x16\x78\xbe\x45\x60\x20\xdc\x6b\x8c\xf0\xbc\x66\xd2\x9d\x60\xfc\x31\xcd\xbc\x07\x39\xd8\x27\xe9\x06\x4a\x70\x2d\x4e\xc5\x2d\x2e\xe1\x36\xda\x2e\x1c\x92\x3d\x04\x2b\x79\x77\xa5\x7b\x8d\x26\xe4\xf7\x9e\xf9\x24\xdc\xed\x1e\x5d\xda\x03\x48\xfa\x09\x3b\x4f\x40\xb8\xdf\x2f\xbc\x6c\x27\xda\x28\xc8\x46\x69\xb8\xee\xd9\xb8\x8f\xfb\xef\x5e\x02\x53\x4f\x65\x0e\x1e\x12\xb0\x6d\x45\x04\x18\x1b\x59\x8c\x27\x97\xe2\x46\x48\x7d\x69\x54\x8c\x4b\x25\x86\x8c\x43\xb0\x08\x87\x64\xb7\x4c\xf5\x44\x11\x63\xc0\x5e\x11\xe9\x8e\x28\x28\x90\xee\x80\x42\xd0\xcb\x1d\xf8\xfd\xbc\x59\xcf\xc7\xc2\xa8\x3b\x1f\x4e\x88\x74\x87\x80\x1b\x59\xf5\x6f\xb8\xbd\xd5\xfb\x21\x9b\x4d\x87\x9b\xf9\xe2\x06\xcd\x66\xe0\xf2\xc1\xe0\xb9\x21\xe4\x5d\x18\x6b\x21\x85\x22\x4e\x30\x0e\x83\x6b\x07\x7e\x48\x12\x50\x0a\x86\x84\xb4\xe6\xdc\xb9\x18\xa1\x51\xbd\x62\xdb\xe0\xad\x24\x01\x6c\x2b\xd2\x35\x8d\xe8\x47\xe7\xe1\x85\x67\xfe\xe0\x46\x40\xae\x68\x06\x25\x9f\xb6\x5a\x72\xae\x1b\xf3\x4b\x97\x43\x56\x7a\x46\x24\x99\x81\xe8\xaf\xf4\x25\x30\xe9\xc6\xfa\x6e\x2c\x56\x46\xa4\x2e\x88\x84\x84\x7a\xe9\xe4\xaf\x62\x28\xdb\x7e\xd2\x0c\xc8\x8b\x18\xb9\x08\x9f\xcc\x34\x28\xcc\x41\x5d\x84\xf2\x88\x0b\x08\x98\x31\xf5\x0c\xeb\x70\x74\x03\x05\xf6\xaf\xfb\xc3\xdd\x66\x8c\x71\x34\xec\xdc\x1f\x8c\xf7\x82\x48\xea\x50\x26\x62\x21\x5d\x25\x26\xd1\x8d\xa8\x76\xb4\x30\x2b\x50\x50\x38\x34\x42\x30\x53\xb9\x74\xee\x27\xb3\x29\x86\xee\x0f\xd0\xec\x06\x45\x07\xc8\x6c\x0b\x44\xd3\x52\xaf\x41\x62\xc4\x98\xa2\xa0\x98\x76\x39\x70\x96\xf4\x93\xc3\xdd\xbe\x72\xb9\x67\x84\x88\xa7\x40\xb3\xae\x99\xa2\xca\xf5\xbd\x5d\xc6\x92\x66\x13\x65\x4a\xc0\x88\x6e\x36\x4d\x17\x46\xd3\x4f\x2a\x9a\xf2\x11\xb7\xcb\x0d\x90\x9d\x25\xf0\x1b\x6a\x40\xa7\x0a\x19\xf7\x99\x18\xf2\x64\xac\x09\x85\x90\xf6\x04\x0b\xdc\xef\x3d\x1b\xdc\xbb\x1c\xb5\x2e\x28\x67\x82\x70\xda\x43\x1f\x58\xc1\x44\xb9\x35\x12\xb5\xdb\x3d\x03\x73\x1e\x5d\x18\x30\x63\x47\x4c\x17\x01\xe1\xe8\x25\xc9\xd6\x6e\xf7\x07\x93\x30\x5c\x10\x05\x9c\x82\x5c\xe6\x5b\x01\x01\xf8\xaa\xd9\xbc\x9f\xf2\x38\x0e\x6f\x84\x37\x36\x75\x1e\xee\x18\xed\xc1\x08\xb6\xc0\xba\xe0\xd6\x8f\x85\x05\xcb\x54\x3d\x64\x11\xe4\x9d\xdd\x55\xdc\x97\xab\xba\x96\xe3\x4a\xb1\x3e\x3d\xd1\x97\x6e\x2c\xf4\x91\xd6\x2a\xf4\x13\x2d\x88\x3d\x62\x96\xd6\x5b\x7a\x4d\x17\x39\x7f\xee\xfd\xbd\x3a\x20\x61\xc2\x1d\xa2\xb4\x89\x96\xea\xfb\x70\x42\x12\x58\x5d\xa7\xfd\x54\xd4\x7b\xc3\xc7\x89\xc8\x45\xfd\x95\x08\xae\xc5\x20\xfd\x89\xce\x36\xc6\x12\x33\x27\xd0\x0d\x47\x17\x0b\xad\xee\xee\x67\xa1\x1c\x44\xb3\x15\x62\x43\x3b\x76\x57\xe0\x23\x8a\x4a\xd7\x9a\x66\xf9\xa6\xe6\xfd\x02\x9c\x74\x60\x1c\xb8\x1f\x09\xed\x95\x54\x1b\x5f\xb1\xad\x8e\x51\x4d\x8a\x50\x8a\xd2\xce\x55\x65\x09\x38\xcf\xa3\xcc\x47\xa9\xec\x80\x0e\x2d\x9f\x70\x9e\xaa\xb2\x09\x77\xbf\xed\x29\x50\x9e\x86\xd8\x13\xa0\x53\x3d\x1e\x92\x4c\xa1\xcf\x1d\x25\x45\x30\x51\x69\xeb\x45\x55\xce\x7b\x60\xec\x65\x26\x98\xa4\x51\x13\x8c\x36\x61\x26\x63\xb2\xc5\x98\x15\x05\xdd\x2d\xec\xb1\x1d\x7c\x51\xf6\x76\x4c\xcd\xda\xd9\x01\x81\x1b\xa2\x6c\xb5\x1f\xc7\xc8\xd4\x5f\x3c\xcc\x64\x96\xd7\xfc\x48\x5a\x6a\xc9\x49\xd7\x18\x4a\xd2\x1d\x80\xf0\x04\x0c\x3d\xb3\x0e\xf8\x9e\x74\xfd\xc5\xc2\x08\x06\xce\xba\x8b\xd4\xf7\xc4\x53\xcf\xd3\x7e\x65\x37\x78\x0c\x91\xa9\x1c\x42\x16\xe4\xfb\x8a\x2c\x64\x8c\xe5\x12\x7e\xd8\x6c\x86\x66\xa6\x0e\x59\x70\x1e\x1a\xe6\x30\xb2\xdd\x74\xc0\xb0\xdc\x56\xa2\x70\x21\xbe\xa6\x3d\xf3\xa0\xcc\x8a\x6c\x15\xa3\xda\xd8\xb9\xd7\xa0\xdc\x6b\xf0\xcd\xf8\x61\xb9\xce\xa1\x9f\x47\x97\x61\x7f\x75\x41\x80\x4f\xf3\xd0\x91\x8c\xd8\xd8\xac\xcd\x30\x66\xca\xfd\x0e\x03\xb6\xd5\x85\x1b\x53\x1d\x2e\xd6\x37\x66\xb1\x1e\xb0\xad\x0e\x2c\xad\xd8\x71\x3f\x66\xe7\x31\xdc\x98\x15\x3b\x4e\xc3\xb7\xcd\x8a\x7d\xc3\x6e\xdc\xeb\x7c\x65\xdb\x66\x2a\x45\xb5\xbd\x1e\xd5\xb8\x3f\x66\xe7\x63\xd8\x36\xa8\xc6\x16\xd5\xb6\x41\xb5\xcd\xb6\xdd\xeb\xac\x89\x83\x66\x33\x4e\x9b\xb3\xc5\xd8\x38\x7d\xec\xd7\xb9\xc1\x23\x64\xb0\x6e\xda\xb3\x4e\x4f\x1f\x16\x67\x0c\xec\x4e\x9e\x3c\xd7\x17\x86\x13\xcf\xf5\xc5\x8a\x6d\x3c\x12\xc3\x98\x7a\x31\x63\x6c\x4c\xe7\x73\xac\x67\x07\x04\x8c\x6d\x17\x9b\x7e\xbf\x81\x6d\xc3\xd2\xad\xee\xd2\xde\x37\x0e\x82\x74\x39\xee\x2b\xf2\x74\x0c\x76\xd1\x8d\xcd\x97\xb6\xd1\x11\xdd\x2c\x9f\x21\x70\x5b\x0f\x33\x5a\x82\xb8\xce\x20\x76\x3d\xdc\x3f\xbe\xc2\x7a\xae\xd6\x4e\x13\x9f\xdd\x19\x2e\x19\x80\x32\xaa\x87\x9f\xd2\xb3\x87\x3c\xd1\xf3\x99\x72\xc3\x62\xc3\xb5\xdc\x82\x0c\x72\xdf\x72\x4f\xd9\x81\x5c\xa2\x26\x49\x97\xf4\x5e\xbe\xa1\x6c\xd6\xb6\x74\x37\xb7\x4f\x08\x2f\xd7\x4e\x4b\x95\x73\x63\x8c\x66\x72\x81\x7a\x25\x8a\x4b\x67\x8e\x55\x4d\xc1\xc8\x2c\xb5\x22\xf4\x43\x52\xa3\xd6\x6c\xe1\xd2\x6a\xd4\x93\xec\x69\x37\x7f\xda\xc3\xa7\xbe\x0d\x14\xe9\x93\x88\xc9\xf3\xe4\x82\x32\xc6\x88\x0d\x73\xa6\xcd\x66\x2a\xbf\xd3\x12\x99\xfc\xb6\x32\x28\xd5\x79\x74\xb3\x49\x48\xc0\x22\x6a\x94\x13\x12\x31\x4e\xdd\x6d\xdc\x86\x0e\x5c\x0e\x51\x7a\xdc\x8a\x08\x26\xec\x7e\x8b\xd5\xeb\x2b\xbf\x75\x3f\x55\xc0\x74\xdf\x71\x32\x55\x4a\x9b\x0a\x76\xed\x5b\x2b\x4b\xd1\x56\x33\x62\x69\x08\x51\x26\x5e\xbd\xe5\x93\x3e\xe7\xc9\x85\x41\x63\x56\x0a\x2f\xed\xe4\xec\x68\x9a\xa9\x11\x12\xd3\xd9\x75\x82\xb0\xdb\xc2\x34\x62\x06\x7b\xaf\x42\x74\x68\x04\x62\x58\x3e\xdb\x59\x1c\xe2\x2e\xc9\xf0\x4c\x72\x0b\x94\xdc\x02\x24\x4b\x32\x41\xa7\x18\xcf\xa6\x99\x3a\x94\x7d\x1c\xd4\x03\x10\x70\x7f\xe3\x29\x08\x3d\x3c\xdc\xe0\xc9\x43\x95\xf2\xc1\x43\xfb\x49\x82\xf0\xf8\xa2\x50\x8b\x03\x26\x0f\x55\x1f\x2d\x57\xd6\xe9\x45\x87\x41\x2f\xca\x82\x41\x42\x96\x9c\x47\x17\xbd\x91\x22\x21\xf0\xf3\xe8\x02\x34\xb4\x5a\x36\x76\x35\x44\x67\x54\x89\x4b\xaf\xd5\xea\x03\x39\xc0\xd9\xfd\x22\xf3\x45\x5b\x05\xdc\x34\x63\x98\x0b\x68\xf0\x59\x98\x3d\xc6\xac\x03\x63\xd6\x81\x01\x13\xbd\xf8\x70\xd8\x6c\x8e\x0f\xfd\x74\x83\xf5\x06\xb6\x19\xb9\x61\xd1\x79\x7c\x41\x5d\x0e\x13\x46\x9e\xb3\xf0\x7c\x8c\x3f\xae\xd8\x8d\xeb\xc3\x94\x3d\x77\x7d\x23\xd8\xb7\xb7\x18\x9b\xd8\x52\x23\x98\xc1\x1d\xdc\xc2\x35\x1c\xc1\x89\x29\xdc\xea\x5e\xc0\xa9\x29\xd8\xea\xe2\x22\x70\xd2\x6c\x92\x19\x3b\x71\x7d\xb8\x63\x13\xc3\xa6\x23\x76\x62\xf8\x0b\x4e\x9b\x4d\x72\xcd\x4e\x5d\x1f\x8e\x98\xd1\x90\xc9\x2d\x3b\xc5\x0f\x47\xcd\xe6\x1d\x1d\x29\x72\x05\xd7\x90\x40\xab\x35\xa0\x70\xa2\x30\xd9\xc4\x36\x4c\x61\x6c\x54\xb2\x41\x8b\x5d\x59\x4f\xe1\x69\xf6\x65\x66\x21\x07\x2d\x36\xb3\x5f\xe2\x16\xdb\x81\x71\x8b\xed\x58\xfd\x32\x1c\x92\x23\x3a\x68\xb5\x32\x5c\x93\x0c\x57\x5e\xd3\xa0\x8c\x37\x6e\xb1\x6e\xb5\xf4\x1d\xcd\xeb\xba\xca\xeb\x4a\xa1\x47\x8a\xcc\x60\x9a\x51\xbb\x4c\x43\xb7\x97\x6d\x2e\x6f\x9d\xcc\xe7\xa3\x2d\xc6\x6e\xa9\xaf\x04\xbf\xee\xd5\x71\xd6\xa9\xab\xd5\x71\xbd\xbe\x8e\x9d\x85\xd5\x64\xb1\x3d\x65\x5a\xf2\x16\xb5\x60\xdc\x6a\x2d\x70\x5b\x20\x3e\x1c\xf6\xb2\xf6\x94\x06\xdd\x8e\xf3\x72\x41\x7b\xbc\xb2\xe0\x95\xe7\xf0\x8d\x7d\x9b\xcf\xcf\x2f\x7a\x29\xbd\x25\x5e\x79\xee\xfa\x90\x2a\x54\xdf\x28\xd6\x48\x3a\x87\xd9\x94\x9a\xcf\x3b\x87\x41\xfe\xfc\x2d\x93\xa0\x8f\xcc\xcc\x99\x79\x09\xdc\x7a\x01\xdc\x79\xdf\xd2\x0d\x9f\x23\xc5\x9c\x4b\x31\x9e\xfc\x7e\xf0\xe4\x5d\x29\xa9\xcd\x89\x5a\xb5\x35\x8d\xe7\x0f\x4d\x0f\x07\xd9\xda\x91\x1e\x0a\xbb\x57\x5e\x02\x47\x5e\xc0\xee\x03\xaf\x03\x3f\x3d\x01\xe6\x45\x9c\x7b\x6f\x53\x3d\xc3\x94\x67\x01\x9a\x51\xc6\x4e\x0d\xdc\x80\xde\xd7\x30\x2c\x28\x04\x6e\xc0\x76\xd2\x1d\xee\x8a\xe2\x12\xb8\x3f\x41\x40\x04\x81\xab\x0c\x94\x62\x89\x45\x1b\xb8\xb1\x1b\xb3\xfb\x99\x17\x59\x0c\x8b\x8c\xfa\xd6\x91\xca\x5c\x99\x45\x70\xca\xf2\x4a\x94\xb7\x8b\xe7\xc1\x05\xdc\x92\x96\x11\x12\x54\x09\x11\xc0\xdd\x9f\x10\x40\x92\x2e\xee\x53\x45\x14\x3c\x86\xc4\x74\x70\x00\x47\x46\x34\x2d\x4e\xab\x34\xd8\x30\x84\x7b\xc5\x72\xd8\xcc\x83\x8e\x9d\x72\x1f\x78\xdd\x52\xbf\xa9\xf2\xda\xf9\xbc\x24\x95\xb6\x4a\xdb\xe6\x35\x8f\xa9\x15\x76\x46\x49\xed\xd9\x70\xbe\xd0\x55\x96\xa9\xcc\xda\xca\x53\xb6\xf2\x21\x66\xa1\x55\x9e\xcd\x10\xc4\xfd\xe7\x86\x1e\xed\x5e\x43\xe8\xc6\x10\x51\xef\x11\xbe\x25\xa1\xab\x99\x82\xd0\x4d\x58\x04\x9d\x43\x62\xe4\x5b\xec\xce\x68\xae\x26\xda\xea\x7d\xe8\xa4\xd5\x53\xef\xf1\x72\x41\x62\xea\x8a\xcd\xb2\x12\xbb\x47\x6e\xcc\x94\x45\xb5\x19\x11\xf5\xca\x38\x28\x6c\x11\xd3\xaa\x56\x0b\x37\x4b\x09\x36\x8b\xfe\x33\x67\xc0\x04\xb9\x78\xcc\xb4\x6d\xd3\x1e\xea\x5f\xb9\xf0\x1e\x30\x9d\x2a\x95\x03\xab\x54\x0e\x0a\x35\xd1\x54\x3c\xb0\x1d\xd8\xea\x42\x00\x6a\x85\x6b\xc8\xce\x91\x1b\xa6\x5d\xd1\xcb\x94\xd4\xe0\x2a\x1c\x0f\x3e\x44\x03\x11\xe7\xcb\xcf\x84\x75\x7a\x93\xc3\x9b\x6c\x21\x9b\x64\x6b\xcf\x95\xb1\xfc\xd9\xb8\x7f\x73\x3e\xb9\xf0\xcc\x1f\x94\xf0\xad\x16\x6f\x11\x3b\xf1\x71\x2a\xf0\x43\x36\x6c\x36\x87\x87\x6c\xda\x6c\x92\x84\x49\xb2\x7d\x3e\xb9\x80\xab\x74\x6c\xa7\x90\xf7\x41\xad\x07\xf2\x2e\xe8\x71\x36\x5d\xe4\xfd\x91\xd9\x66\xd0\x01\xe5\xfa\x50\x4e\xac\xf2\x4d\x2d\x6d\x5a\xa0\xab\x2e\x53\x91\xa5\x47\x4a\xcc\x06\xdf\xcd\xb3\x2e\x07\x26\x7e\x57\xab\x14\x65\x55\x56\x94\xeb\xb6\xb2\x06\x51\x8b\x1c\x59\xda\x87\xcf\xf5\xe0\x95\x4e\x22\x26\x4b\xdb\x7f\xa0\xd9\x36\x32\x2c\xed\xe9\x15\xe3\x35\x9f\x93\x55\xaf\xad\x97\xa9\x3e\xb6\x3d\xd1\x6c\xea\x2d\xc6\x64\xb3\x59\xdb\x56\xd4\x20\x4b\x47\x98\x71\xb7\x3c\x06\xe5\x26\xb5\x58\xfa\xd4\x89\xe6\x26\xe6\x3b\x85\xfa\x66\xbb\xcc\x90\x3e\xe3\x9a\x93\x0e\xc8\x5c\xe7\x29\x41\xe7\x6a\xbd\xed\x5a\x37\xae\x2b\xf3\xab\x48\xef\xaf\x7a\xe9\x7e\x67\xca\x8d\xbd\x55\x9f\xd8\xfd\x77\xcf\x34\x61\xea\x29\x37\x59\x64\x55\x1f\x78\xe5\xf3\x53\x71\x9a\x86\x45\xbb\x21\x86\x2e\x66\x1e\x0c\xdb\x23\xb2\xc4\xf6\xe7\xda\xbd\x29\x54\x3e\x99\x85\x83\x16\x0a\x13\x41\x7c\x14\xd5\xbd\x4a\x41\xc1\x4c\xd1\x9e\xc8\xd7\xaa\xb4\xa6\x50\xc6\x42\xe9\x27\x62\x18\x29\x41\xb6\x15\x49\x30\x5e\xd2\x4d\x28\xf0\x7a\x3d\x8f\x8d\x09\xb3\x95\xd6\x40\x0b\x27\x42\x79\x83\xb8\x44\xb6\xe9\x67\x2b\xc0\xb5\x7b\x54\x36\x56\x1a\x9d\x2d\xb3\x00\x29\xf4\xab\xad\x2d\x1c\xb8\x31\xb3\xb3\xc0\x9d\xe5\x43\xf6\x68\x15\xbb\x66\x5e\x12\xdb\x91\xd5\x2f\xe1\xb0\xec\x6e\xd0\x2c\x77\x0f\x3f\x4b\xb7\xca\x5f\x28\x3e\x42\x3f\x71\x9e\x0b\xa7\xdc\x3f\x99\xfa\x7c\x2e\x2e\xdc\xa3\xde\x5b\x69\x2c\x4b\xc6\x58\xe2\x06\xfd\xc4\x8d\x3d\xd3\x5f\xee\x4f\xec\xae\x52\x24\xd3\x82\x68\xf7\xce\x1e\xda\xcf\x1b\x50\x64\xae\x60\xda\xbd\x4d\x03\x13\x92\x72\x60\x42\xba\xce\x27\xe7\xdc\x68\xba\x81\x7b\x04\x11\xdb\x41\x47\x44\xd0\x8f\x6c\x5d\x51\x5a\x57\xaf\x36\x6c\x11\x54\x86\x3a\x70\xd5\x05\x5d\x88\x66\x13\x77\xfe\x45\x29\x30\xc6\xe6\x55\xa8\x1e\xef\x50\x6e\x4c\x24\xed\x0d\xac\x67\xd3\x9b\x92\x6e\x87\x2e\x16\x24\xc1\x74\x21\x0c\xa7\x28\x91\x4c\xe4\xed\x2b\x85\x6a\x3e\x55\x69\x04\xa4\x4d\x2e\x65\x58\xfe\xf4\x6e\x2a\x32\xd6\xf8\x5d\x12\xe9\x6a\x71\xab\x9f\x46\x52\x0b\x69\x8f\xff\x75\xb7\xd6\x80\x3a\x4e\xd1\x49\x59\xa2\x01\x9e\xb9\xf0\x62\xa8\x1f\xef\x2c\x9d\xee\x54\xec\x35\x99\x91\x50\x41\xe2\x4a\x3e\x11\x90\xb8\x68\x21\xe2\x8e\x48\x71\xe2\x5e\xba\x9a\x8f\x3e\xf0\x89\x70\x75\xf4\x2e\x9a\x09\xf5\x94\xc7\x82\x50\x08\xd8\x19\x5a\x16\x45\x07\x82\x28\xbc\x3f\x58\x57\xc0\x5e\x93\xa7\x8a\x44\xe7\xe2\x82\x42\x90\xf7\xe7\x1d\xf9\x03\x8f\xa9\x42\x50\x89\xa5\x50\x20\x41\x97\x36\x69\x31\xd4\x10\xf3\x78\x3c\x33\x7f\x1e\x9b\x3f\xa5\x60\x45\x3c\xcd\x9e\x45\xd8\x0f\x7c\x48\x58\x80\xdd\x03\x9c\x3d\x55\xa5\x48\x95\x77\x95\x78\x8b\xdc\x4d\x2e\x70\xa2\xb1\x2b\xa3\xa8\x2a\x33\x62\xdf\x14\x31\x6b\x97\x59\x3d\x8c\xc9\xb7\x28\x36\xe2\x2f\x2b\x5b\xe8\x96\x4c\x51\x25\x53\xac\x25\x53\x80\x2c\x85\x33\xdc\xa2\x6b\xfc\xd6\x1e\x01\x40\xb2\x03\x33\xb5\x74\xa8\xc7\x02\x22\xf3\xe8\x47\x83\x3b\x08\x4d\x13\xa2\xf5\x4d\xf8\x21\x59\x62\xc3\x18\x18\xc7\x33\xff\xec\x0f\x49\x1c\x53\xd4\xa1\xe4\x8c\x12\xe5\x06\x3c\x6d\x5d\x68\xd6\x9d\xc8\xb4\x2e\x82\x10\x0c\x5b\x43\xc8\x34\xfc\x90\xac\x03\x01\x3a\x55\x82\x83\x66\x93\x64\x44\x30\x3c\x0e\x7d\x40\x6d\xf3\xe1\x8d\x5a\x19\x38\xa3\xc4\x8f\x44\xc4\xfa\x48\x86\x13\xdc\x01\x78\xa1\xf8\x44\xf4\x57\xbe\xad\xc4\xfd\x94\xe2\x9d\x24\x74\xc5\xee\x83\x83\x0e\x2d\x45\xdc\xbc\x53\xc4\x7a\x62\x89\x4e\x4f\xc0\x94\xa3\xa3\x39\xa1\xf7\x09\x6a\x27\x49\xbf\xe3\x91\x37\x8a\x70\x0a\xb8\xd3\xda\xcd\x27\x59\xed\x24\x1e\x93\xa0\xfa\x04\x61\x50\xfc\xa0\xb2\xd2\x35\xea\x5a\xc7\xfe\xb4\x48\x12\xb6\x53\x8e\xa2\xff\xa0\x8a\x0d\xf4\xe7\xc2\x74\xcd\x38\x0a\xb0\x45\xee\x95\x59\x84\x5d\x3e\x9f\x4f\x49\x97\x2e\xd6\xc6\x33\x46\x11\x7c\x13\x95\x90\x2f\x7a\xaf\x9a\xcd\xab\x30\xd6\x91\xba\x73\x47\x11\x51\x14\x24\xb1\x69\x1a\xb0\xa5\xaf\xd6\xee\x02\xaf\xc6\x96\xa1\x32\x86\xc8\x89\xe6\x5a\xa0\xcf\xdc\x81\x12\x5e\x38\x53\x6b\x53\x28\x6c\x46\x9a\xea\x00\xeb\xf0\xde\xd7\xdd\xf9\x65\xef\xfc\x02\x56\xec\x98\x78\xd5\x80\x62\x78\xbd\x9a\xad\xec\x6e\x41\xdf\xfe\xe3\x9d\xa9\xf2\xbe\x7f\x35\xd0\xe4\x52\x92\x5a\x40\x5d\x39\xb7\x29\xbd\x7f\x2a\x49\x82\x01\x60\x45\x82\xd3\xe5\x1d\x20\x55\xdd\x01\xc2\xd3\xd2\x25\x42\xf5\x9a\xbd\x1f\x1b\x72\xb7\x2a\x86\x22\x3d\xc3\x53\x3e\xc5\xa4\x68\xff\x5a\xe3\x2e\xbe\x77\xa4\xcb\x7b\xf1\x1f\x53\x5e\x5f\x11\x3d\xab\xe8\xfd\x1b\x55\x1e\x17\x1b\xe8\x9e\x05\xa9\xb9\x23\xa1\xd3\x4d\xda\x27\x77\xaf\x07\x66\xae\x28\x22\xfb\x47\x92\x18\x99\x46\xbd\x13\x49\x06\xb8\xe1\x67\x27\x31\x46\xed\xaa\x6a\xd4\x6e\x11\x28\xf7\xb1\x2e\x5a\xb2\xd5\xea\x5c\x5d\x10\x0a\x4f\x37\xc5\xed\x6a\xb6\x1c\x70\xf2\x5a\xb9\x71\xa0\xa2\xf1\x18\x21\xe1\xe9\xa2\x1e\xf8\x58\x6d\x19\x86\x3a\x6a\x42\x4b\x67\x06\xf4\x86\x78\x8f\xf5\xe4\xa6\xb5\xbe\x13\x43\x63\x86\x65\x3f\x4f\xa3\x29\xd3\x69\x23\x0c\xee\xaf\x8a\xfd\x55\x82\x99\x5c\xe3\x0d\x98\x6a\xf1\xc3\xc2\xeb\x17\xb1\x4e\x2f\x68\x36\xa3\x43\x6e\x17\xd1\xd0\x68\x33\xc5\x21\x7d\x63\xde\x33\x79\x1e\xb5\x5a\xb8\x11\x76\xae\x5a\xad\x8b\x66\x93\x74\x3b\x8c\x85\x7d\xa2\x5b\x2d\x10\xac\x4b\x3d\x22\x5a\x2d\xc0\x34\x0e\x8c\x91\x83\xdd\xbd\x47\x8f\x9a\x21\xed\xd7\xca\x79\xdd\x62\xff\xfb\x0d\x09\xfa\xca\x6b\x77\xd3\x28\x2c\xf8\xb9\x21\x2a\x4c\x1d\xe6\x66\x51\xb5\x0a\x5d\xa5\x94\xf6\x25\xd1\x6e\x9c\xf8\xb1\x36\x86\xc9\x0e\xa5\x7d\xd5\xda\xf1\xda\x5d\x4f\x12\x7d\xae\x2e\x68\xdf\xf9\x53\xa2\xbb\xf6\x5c\x5d\xf4\xdb\x3b\x9e\x6a\x75\xcd\xd7\x76\x77\x41\xe1\xd9\xa6\xb0\xb4\x6a\x3d\x46\xbb\x59\x50\x78\xa9\x56\x66\x41\xe8\xc9\xc2\x0a\x93\x99\x22\xa7\xab\xa9\x0f\xec\xfe\xb5\x3e\xdc\x7b\x34\x9f\xef\x3f\x2c\x32\xa8\xc9\x42\xab\xa2\xf0\x42\x6d\x4c\x6f\xd1\xe9\x15\xfd\xd2\x53\x85\x72\x5a\x23\xb6\xbd\xf7\x08\xb7\xe7\x0e\x3b\xf3\xb9\x3c\x64\x49\xea\x89\x13\x4c\xfe\x26\x5a\xc9\x22\x8f\xc9\x51\x76\x1c\x3e\xab\x0d\xa9\x1d\x3a\x2b\xdb\x26\x56\xb5\x6d\xef\xd1\x3f\xc5\x7c\x2e\xfe\xb9\xff\x90\x86\x43\x72\xb0\x6f\x7f\x3d\xec\xa0\x7e\x28\x0e\x1f\x3f\x9c\xcf\xbb\x9d\x9d\x43\x91\x92\xa3\x59\xf7\xe0\x37\xdd\x12\xed\x47\x0f\xad\x5f\x2f\x7f\xb1\xbf\xdf\xab\xbe\xd8\x7b\x54\x10\x2d\x31\x1c\xb0\xf7\x57\xcc\x9f\x94\xf2\x26\x20\x43\xf3\xc3\x4e\x3f\x9b\x01\x1e\x6f\xc9\xc2\xef\x1d\xa4\xce\x99\xa8\x36\x0d\x5a\x2d\xda\x33\x4c\x1f\xf5\x89\x60\x5d\xd0\x36\x9d\xcb\x12\xd3\x47\xb4\xd9\x34\xb0\x8b\x9c\xcd\x79\xca\xe1\x36\x7d\x4e\xa5\x77\xcb\x11\x81\x35\x41\x69\xe3\x3a\x24\x93\x62\xd6\x38\x7b\xff\xee\x95\xd6\xd3\x63\xab\x86\x98\x91\x83\xd3\x21\xd1\x8c\x53\x63\x2d\x2f\xef\x41\x4f\x55\x34\x52\x22\x8e\x9d\x8a\x44\xc9\xda\xf8\x34\x9a\x4c\x13\xcd\xfd\xb1\x68\x36\x9f\x9a\xf9\xc2\xc9\x7d\x10\x78\x46\x19\xe0\x03\x31\x80\x60\xe0\x49\x57\x47\x9a\x8f\xed\x6a\xb0\x22\xc8\xc0\x11\x4a\x45\xca\xa9\xc4\xe5\x91\x13\x49\x8e\x86\x6b\x4b\x68\xab\x1e\x2d\x97\x39\x59\x5f\xc6\x10\x54\x2b\xb0\xca\xcc\x5b\x71\x74\x62\xc0\x71\xd7\x3f\x9e\x46\x32\x16\x5f\x8e\xdf\x81\x7f\xe2\xdd\x07\x57\x9e\x74\x63\xcd\x75\x12\x43\xf0\x2e\x7f\x3e\x15\xb7\x7a\x01\xc1\xcf\x15\x47\x5c\xb6\x23\x9b\xa0\xa4\xc8\xc2\x56\x4c\x05\x99\xe6\x78\x71\xfe\x54\x7f\x4a\x87\xc2\xea\x74\x33\xc0\x21\xb0\x46\x89\x31\xe1\x32\x26\x74\xbc\x86\x43\x7b\x9d\xc3\x08\xf5\xb6\x20\x95\x58\xa1\x1c\x91\x0e\x44\x46\x83\x2e\xbf\xda\x69\x45\x14\x14\xbb\x23\x1f\x86\xe5\xe4\xdf\xc5\x1a\x71\xad\xc9\xa9\xb1\x6a\xfb\xbc\xe5\x00\x66\x55\xe0\x1e\xa7\x0b\x3c\xc6\x9a\x87\x9e\x11\x69\x96\xd7\xa3\xf1\xf8\x38\xed\x95\x57\x82\x0f\x84\x8a\x09\xa5\x10\x94\x7b\xcb\x1e\xb9\xc2\xbd\x49\xdb\x3f\x87\x3b\x9d\xce\x7c\xbe\xdb\xe9\x1c\xb2\xec\x15\xcd\xc5\xa2\x51\xcd\x59\x51\xd8\xf4\x25\x9c\x48\x72\x3b\x34\xeb\x74\x4f\x31\x45\x74\x4d\x6b\x38\xb2\xb1\x7f\x1e\x59\x5b\x78\x46\xee\x86\x36\xa1\x98\x59\x3e\x89\x84\xc4\x0d\x70\xf3\x75\x41\x7b\x5a\xdd\xdd\x4b\x37\x9a\x0a\x49\x12\x37\x78\x0f\x89\x3b\xe0\xb0\xd5\x59\xce\x3e\x81\xbc\x75\x3d\x24\x06\xc0\xa0\xd9\x5a\x9f\xad\x26\xf8\xd9\xd3\xae\xdf\xb3\x59\xe3\x30\x88\x24\x9d\x69\xb6\x8b\xcc\x0c\xb1\xd9\xcd\x5c\xdf\x58\xda\x39\xb9\x77\x53\x14\x7c\xca\xf5\x41\xba\xb3\x50\x5f\x3d\x55\x62\x20\xa4\x0e\xf9\x38\x36\x36\xd0\xc0\xcc\x52\xe5\x06\xfb\xd4\x58\xcc\x6e\x3a\x03\x4c\x91\x7d\x97\xdb\x86\x65\xe9\x05\x12\x37\xe0\x45\x14\x46\x2c\xe4\x80\x3c\x1f\x12\x41\xfb\x64\x05\x3d\x4e\x6a\x3e\xb7\x0d\x05\x8e\x3d\xec\x2e\x5c\x9f\x7a\xf8\x54\x56\x42\x5c\xee\x47\x4a\x13\xba\xa8\x6b\x3b\xd5\x50\xd3\x0e\xf8\x9e\x76\x7d\xe0\x95\x29\x20\x99\x11\x0d\x85\xf3\x6d\x46\xbe\x0d\xf3\xfc\xf4\x48\xf7\x17\xb5\x26\xad\x8d\x33\x72\x7a\xd2\x0d\x3e\x36\x9b\x44\xb7\x98\x33\x71\xcc\xfc\x0e\x44\xfa\x33\x74\xec\x38\x16\xec\x7b\x2c\x46\xcf\x6f\xa7\x48\xd5\xf2\x48\x1e\x69\xa3\x59\x3f\x59\xaf\xf3\xcb\x64\x3c\x46\x83\x6f\x92\x96\xdc\x9c\x52\x16\xd2\x5d\xc4\x8e\x99\x99\x36\xd4\x24\x4b\xe1\x03\x11\x6b\x77\x7b\xbc\xd5\x3a\x94\xcd\x26\x86\xc3\x8a\x5b\x11\x90\x80\xd2\x66\x33\xda\x2a\x43\xf6\x0a\x84\x61\x1e\xc1\xd5\xee\xc2\x30\x0d\x57\x09\xcd\xf4\x0e\x33\x3f\x3f\x13\xe7\xe1\x45\x6f\x78\xde\x6e\x87\x17\xcc\x37\x8a\xb3\x8f\x6a\x73\x92\xa5\xf1\xfb\xee\x83\x38\xef\x5c\x80\xb0\x22\x02\x38\x1c\xe3\xa9\x7d\x1b\x8c\x92\x55\x9a\xcf\xe6\xe2\x15\xd3\x90\x26\x18\xd4\xe5\x65\xc5\x66\xcb\x4a\x3d\x48\x9d\x3c\xdf\x5d\x66\x00\xd5\x74\xcd\x70\x48\x82\x56\xeb\x9f\x2c\xc9\xb5\x90\x92\xe3\x85\xab\x11\x6a\xe7\x59\x18\x47\x7b\x17\xb2\x74\xd1\xca\x34\x52\xe5\x29\x9e\x72\xc8\x73\x75\xd1\xd3\xe7\xed\x36\x86\xb3\x5e\x6b\x22\xb0\xb1\x79\x9e\x7f\x6c\xae\x84\x02\x7c\xa9\x8a\x9d\x0b\x08\xe0\x38\x57\xf5\x29\xfc\xd8\xac\x09\xe9\x6c\x4c\xcb\x63\x59\x19\xd9\x9e\xcc\x65\x72\x90\x0d\xab\xa0\xe9\xfe\x99\xd5\x40\xf2\x00\x56\x7b\x78\x87\x43\x2a\xaf\x29\xad\x22\xce\xbd\xfb\x35\x78\x4a\x61\xe5\xc0\x7c\x5a\x6b\x01\xcb\xe6\xa6\x14\x75\x1b\x33\xe0\xfd\x8f\x0d\x25\xd5\xe1\xa1\x5c\xd0\xde\xd9\xda\x7a\xd5\x3f\xff\x29\x37\x64\x3a\xfe\x27\x7e\xee\x55\x12\x2c\x8a\x75\xc7\x21\xf3\xf3\x5e\xaf\xa5\x16\xea\x86\x8f\xcb\x56\xd0\x53\x49\xc4\xc6\xd3\x5e\x79\x21\x45\x6b\xe7\x71\xdf\x96\xa2\xdb\x1b\x33\xf2\x5d\x83\xf3\xa7\x6c\x34\x1a\x0d\x07\x66\xe4\x29\xfe\x72\x40\x96\xf7\x43\x7e\x2f\x97\xb8\x23\x97\x7a\x6d\x03\x5b\x46\xdd\xef\x18\x49\x96\x17\xfe\xa3\x5c\xf8\xf1\xc3\x43\x46\x24\x3b\xc3\xa3\x56\xcd\xa6\x3c\x64\xdd\x9d\x9d\x02\x56\xea\x02\x36\x07\x3b\x64\x8f\x3b\xcd\xe6\xc1\xfe\x21\x2b\xf9\x43\xd5\x6a\xc8\xfd\x87\xcd\xe6\xde\xa3\x0a\xa4\x2e\x41\x5a\x62\xe6\x73\xac\x67\x3e\x47\x24\xa5\xbb\x30\x74\x25\xbb\x40\x71\x01\x45\xe5\xbd\xcb\x4b\x37\x3c\xac\x29\x11\x94\xde\x3b\x0e\xda\x34\xa8\xf8\x6a\xf6\x11\x42\xcd\x26\x30\xd4\x8c\x54\x3c\x15\xa5\xc3\xc6\xca\x0d\x30\xbf\xf0\x00\x92\x75\x6c\x76\x47\x42\x0d\xd2\xdd\xee\x6b\x2f\xc1\xd8\x69\x6e\x46\xb8\xfc\x31\x01\xfc\xc7\x22\xb6\xe6\xc7\x9a\x73\x7d\x36\xaa\xb6\xbd\x83\x3b\x64\xdb\xd5\xac\xb8\x46\xf3\x32\x64\x48\xe0\xec\x0e\xdd\xeb\x3e\x68\x37\x80\x3b\x32\x44\xcc\xa0\x5d\x81\x39\x91\x13\x50\x8c\x83\x66\x02\xcf\x7f\xf8\xab\x34\xc7\x06\x16\x5a\x6b\x07\xce\x48\xa4\xc1\x66\x18\x46\xc3\x03\xce\x0c\x17\x41\xbc\x12\x95\x64\x12\x7e\x01\x9d\x2c\x63\x1a\x6b\xd6\x85\x81\x66\x3b\x70\xa3\x59\x07\xb6\xf5\x5a\xd9\xb1\xa0\x30\x59\xa9\xfb\xe6\x39\x20\xe0\x4a\x6f\x3a\x0f\x9c\xa7\xf9\xa5\x30\xdd\x00\xd8\x29\x03\x8e\x36\x00\x76\xcb\x80\xb3\x35\xa4\xa5\x27\xfe\xe0\x6e\xcd\xf7\x9d\xf4\xfb\xad\x66\x2f\xe1\xfa\x2f\x90\x1c\x69\x66\xea\x5d\xc0\x89\x66\x4a\xc2\xa9\x66\x1f\xe0\xb9\x66\x33\x09\xdf\x56\x0f\x48\xcb\x71\x16\xf0\x5d\xaf\xf7\x48\xfe\x01\x12\xbe\xda\x3c\xb3\xf0\x74\x2d\xdc\x31\x99\x91\xdf\xc1\xe6\x89\xa1\x70\xa9\xd9\x46\xa6\xdd\xd2\xf5\x34\xce\x02\xf3\xe9\x5a\x86\xc5\x3c\xff\x59\x4a\xe7\x32\x83\xc2\x1b\xcd\x7e\xc2\xbb\xbf\x40\xde\xcd\x52\x67\xe7\x2a\x79\x5a\x83\x84\x84\xa9\x76\x17\x6b\x88\x34\x96\xeb\x49\x26\x40\xb1\x04\x34\xe3\xa6\x82\x0f\x7a\xfd\xcc\x7d\x67\x67\xce\x99\x69\xe0\xab\x0d\x70\x6f\x0c\xdc\x8c\x7c\xd0\xd0\x81\xdf\x15\x51\xb4\xdd\xa5\x36\xd7\xf7\x59\x75\x08\xb2\xe0\xde\x92\x49\xdd\x29\x65\xf4\xae\x24\x00\x55\x7d\xe5\x75\x3b\x3b\x7b\xbf\x11\xd5\xc6\x0f\xb4\x55\x29\xd8\xa5\x6d\x4c\x16\xd9\x3a\xd8\xdf\xdf\x3d\x58\xc0\xeb\x35\x33\xf9\x52\x43\xa4\xd3\x89\xf5\xf1\xef\x90\x83\xae\xcd\x3a\x4d\x97\x44\x1a\x15\x4d\xa6\xeb\x7d\x97\x52\xcf\xbe\x6a\xc9\xf3\x6e\xf1\x7e\x87\x52\x54\x77\xe0\x78\x5d\xb7\x39\x7f\xca\x3f\x25\x71\x5a\xdf\x34\x91\xad\x2e\x6d\x11\x87\x36\x9c\xd6\x5b\x45\xbe\xea\x34\x91\x09\x7c\x5d\xd9\xa2\x19\xf9\x69\xba\xfb\x8c\x2e\xe0\xe7\x32\x72\xe9\xa5\x7c\xb1\x22\x03\x51\xf9\x08\x94\x44\xde\x5b\xf2\xc2\x7e\xcc\xf2\x71\x76\x6d\x32\x87\x22\xac\x36\x4f\xf7\x00\xb2\x74\x05\x08\xf9\x43\x11\xb3\x00\x70\xbb\x52\x29\xb3\x52\xce\xc8\x89\x06\xad\x4d\x87\x13\xa3\x35\x19\x5d\x1b\xd9\x2f\xe9\x3b\xae\xd3\xd2\x9e\x73\xfe\x0f\xcc\x56\xf6\x8f\x0b\xc7\x72\x3c\x37\x0c\x99\x1f\x11\x69\xe4\x1b\xf1\x59\xb4\x81\x0f\x01\x73\xce\x6d\x5f\xb9\x9c\xb6\x9c\x0b\xa7\x8c\x37\xd8\x84\x65\xc7\xcb\x4e\xe9\x70\xd3\xae\xc8\xf5\x71\xca\x44\xae\x9f\x9d\x0a\x0b\x19\x51\xae\xdf\x77\x4e\xaf\x44\xe3\x4d\x1c\x49\xf7\x99\x08\xa2\x81\x70\x23\x29\x3e\x0e\x1b\x5c\x37\xbe\xc7\x91\x74\x5a\x56\xfd\x70\xe0\x35\x36\xd3\x73\x96\x40\x1d\xda\x72\x1a\x43\x1e\x8e\x31\x27\x5b\x43\x5f\x89\xc6\x30\x1a\x8f\xa3\x99\xcd\x28\xf5\x4d\x93\xdf\x15\x89\xa8\x81\x9a\xf1\xbb\xd8\x73\x7a\x35\xc5\xc6\x28\x33\xd8\xa0\x10\x66\xe4\x95\x86\x63\x8d\xd1\x37\x0b\xc9\x38\x13\x2c\xc2\xdb\x57\x12\xa6\x4a\x4d\x4c\xb5\x50\xe7\x98\xcb\x46\x28\x75\xd4\xe0\x2b\x5a\x80\xa9\xed\x64\xd4\x98\x46\x71\x1c\xfa\xe1\x38\xd4\xa1\x88\x9d\x96\x6d\xf4\xfa\xf6\x6d\x39\xc5\xf6\x6f\x80\x03\x1f\x61\xb6\xf9\x74\xe0\x43\x86\xe5\x3f\xa9\xc8\x1f\x8b\x89\xad\xc4\x34\x19\x37\x59\xd7\x61\x6d\x39\x9e\x69\x26\x6a\x70\xde\x72\xd9\x51\x78\x23\xa4\xc5\x80\x70\x0e\x6d\x91\xb7\x8a\xcc\xc8\x73\x0d\x7b\x80\x5d\x97\xbe\x0e\x8c\xe8\x7a\xa6\x37\x1c\xfe\x2d\x2f\x5a\x10\x78\x1a\x06\x78\x50\x1a\x5e\xe2\x81\xec\x17\x9a\xbd\x87\xcf\x6b\x45\xda\x0f\xa2\xe8\x83\x1f\xd6\x88\x7c\xaf\xd9\x0b\x4d\x66\xe4\xb3\x86\x1d\xd8\xdd\xa1\x14\xbe\x68\x76\x4b\x9e\x19\x61\xf7\x5e\xc3\x4b\xf3\x1f\x85\x27\x9a\x85\xf0\x63\xfd\x42\x8d\x47\x9f\x21\x5c\xab\x8c\xdb\xba\x3e\x69\x76\x04\x6f\x35\xfb\x02\xbf\xaf\x5e\xbb\xb2\x6c\xb2\xf0\xc7\xda\xaa\xb2\x9b\x16\xfa\x98\xbd\x1b\xa4\x60\x2f\x40\x09\xb6\x0d\x5a\xac\x76\x54\xf7\x32\x83\x7d\x46\x94\x80\xdd\x1d\x90\xd6\x8f\xe5\x83\xb6\xf3\x2b\xed\x4a\xed\xf2\x45\x96\x01\x4f\x14\xab\xd9\x6b\x14\x19\x76\x55\x41\x23\x5d\xfc\x55\x3d\x2f\x34\x51\x0f\x76\x77\x72\x39\x93\xfb\xbd\x0b\x02\x5c\xde\x93\x6c\x46\xb4\x40\x49\x97\x61\x4e\x96\x31\x63\xd2\x40\x91\x61\xde\xdd\xf9\x4d\xb9\x02\x04\x7b\x9b\x8e\xd8\xee\x0e\xe8\x76\x97\x52\x90\x4c\xf6\x0d\x0f\xba\x23\xea\x29\x77\x04\x06\xbb\x30\xd8\x4d\xe9\x5e\x7e\x7e\xfb\x99\x86\xdf\x0d\xd4\x90\xb6\x34\xcc\xc8\x1f\x18\x3a\xff\xdb\x7b\x4d\x11\xb2\xc8\xaa\x57\x86\x4c\xb9\x00\x3f\x53\xe0\x82\x3d\x87\x40\x6c\xd8\x89\x49\x17\x6d\x75\xd8\x29\xda\x9d\x08\xd8\xea\xc2\xfd\xc8\x13\x20\x3c\xfd\x60\x77\x67\xde\x81\xa1\x97\xa4\x2e\x16\xce\x52\xcd\xea\x8e\x3c\xc1\x36\xa1\x03\xa6\x67\x54\x4b\xc5\x54\xdb\x34\x92\x69\x10\x76\xb4\x38\x08\x0a\x09\x4b\xf0\xc4\xf2\xca\x0e\xeb\x1c\xca\xfc\x14\xe9\xbf\xef\xee\x80\x60\x88\x57\x83\x6c\xeb\xd2\xfe\xde\x0d\x09\x84\xa9\xa9\xad\x4d\x0d\x12\xce\xa0\x08\xf7\xf8\xa2\x0d\x37\x8b\x15\x3c\xba\x25\xdd\xed\x05\x0c\x05\xf3\x25\xf8\x82\xc5\x12\x62\xb1\x59\x81\x1b\x57\xbf\xaf\x58\xb8\x32\xce\xae\x05\x68\x75\x6b\x81\x59\x3b\xb9\xbc\xca\x6e\x37\x5b\x2c\x60\x20\x18\xd7\x70\x23\xd8\xe6\xb3\xf8\xf7\xfc\xc0\xe3\xe0\x07\x9e\x02\x9e\x78\x02\xfc\xc4\xd3\xe0\xdf\x79\x12\x82\x53\x0f\xfb\x72\x5b\x30\x2d\x61\x22\x36\xcf\xcb\x2b\xc1\xa4\x84\xa9\x58\x2b\x08\x0e\xbb\x7d\xe5\xdd\x91\x2b\xc3\x7b\x13\x7b\x4c\xd1\xb0\xcd\x48\x30\x2e\x61\xb6\xb1\x9c\xe3\xd8\x82\x1d\x48\x6f\x52\xb9\x13\x6b\x8e\x4f\x75\x30\x4e\xa6\xa2\xd6\x80\x60\x7b\xbb\x78\x58\x61\x6f\x9f\x31\xdd\xef\x7a\x1d\x48\x98\xe8\x25\x45\x68\x53\xab\x55\xc4\xdf\xd6\x12\xb4\x63\x50\x64\xba\x4d\x95\x9f\xe6\x3a\xd2\x3d\xc5\xba\x9d\xdf\x54\x8b\x97\xb6\x63\x12\xc6\x44\xff\x48\x7b\xd7\x9a\xd8\x9a\xda\xca\x53\x74\x01\xb7\xeb\xe6\x44\x38\x24\x81\xa9\x64\x3e\x9f\x91\x6d\x01\xce\xff\xe5\x40\x42\x4b\x75\x58\x8a\x66\x64\x24\xc0\xf1\xcc\x37\xa4\x26\x5d\xc7\x39\xae\xe3\x55\xe0\x80\x71\x3c\xf7\x79\x27\xc8\x8c\x4c\x05\x04\xad\x2e\x66\xeb\x49\xc5\x0d\x2f\x4c\xc7\x23\xdd\xe3\x2c\xf7\xb9\x5e\x6b\x32\x24\x37\x02\x75\xd9\x99\xb0\xd1\xc4\x3c\x3b\xdd\xb2\x58\x02\x4a\xe0\x48\xe7\x5f\x29\x5c\x8b\x95\xab\x91\x6d\x9d\xa8\x37\x28\x49\x1b\xf4\xc0\x01\xeb\x81\x4a\x4c\x83\x12\x96\x14\xf9\xb4\x6e\xc8\xad\xa5\x65\x6a\x38\x55\x50\xc4\x88\x94\x25\x65\x82\x52\x30\x83\x29\x4b\x66\x01\x47\x62\x85\xc1\x60\x29\xd1\x75\x4a\x44\x4a\x49\xdf\x01\xab\xfa\x09\x9b\x66\x54\x14\x94\xdc\x92\x6b\x53\xc5\xb5\xb6\xfd\x29\x5a\x5d\xd0\xd4\xd0\x83\xd4\x08\x74\xd8\x56\x41\x6d\xcf\x18\x52\x4e\x56\x8a\xa0\x40\x97\xf2\xc3\xa6\x84\xe8\x94\x90\x7f\xcb\x92\xe0\x1a\x93\xe9\x5e\x33\x5d\x10\x72\x47\x8e\xca\x84\xe8\x56\x17\x93\xa1\x21\x19\xba\xbc\xd5\x91\x02\x1e\xa5\x97\xd2\x9d\x0a\x26\x24\x3c\x17\xab\xf5\xe9\x53\x01\x78\xcd\x96\xf7\xe0\x81\x03\x92\xf6\x67\xe4\xc4\x4c\x32\xac\xe2\x21\x48\x4a\xbd\x02\x26\xae\x00\x75\x2d\xd0\x23\x04\x32\xea\xfe\xb7\xe5\x19\xd9\xeb\xd1\xce\x02\xbe\x0b\x76\x24\x41\x4a\xf6\x5d\x98\xc9\xf8\x74\x2d\xb3\x88\x22\x41\x8c\x40\x2e\x16\xf6\xf8\x09\xcf\xde\x5b\xee\x8e\x18\xb7\xef\x23\x3b\x5a\x11\xc2\x46\x25\x58\x0b\x51\x4a\x9a\x92\xc0\x0c\x43\xce\x66\x18\xd1\x85\x97\x29\xb8\x1c\xf6\x3b\x9d\x43\xdd\x47\x33\xc9\x90\xf1\x5a\x1b\x15\xd5\xbb\x25\x4f\x85\xf5\x9a\xb4\xba\xa8\x84\x16\x5d\xbb\x8c\x4a\xad\xf9\xaa\xea\xaf\xab\x17\x27\x5c\x8a\xf5\x5b\xed\x79\xfd\x1d\xcb\x45\x6f\x2a\x5c\x54\xbe\xea\xe5\x8e\x5c\x0a\x58\x1f\x07\xa4\xc1\x5e\x02\x96\x3b\x57\x28\xbc\x13\xec\xb9\x84\x0f\x62\x5d\xd0\xc8\x8c\xbc\x13\xab\x76\xd8\xbe\x0b\xa2\x30\xea\xc6\xa2\x79\x55\x21\x5f\x63\xf0\xd9\x6a\x14\xea\x57\x50\xcf\xd2\x7b\x7f\xe9\xc2\x4c\x61\x5b\xc5\x99\x60\xc7\x12\x5e\xaf\x5d\x14\x2e\x25\x41\x8c\x67\xc2\xb6\x91\xda\x5b\x06\x36\x78\x9a\x3e\xac\xac\xbe\xb3\x00\xa2\xd8\x8c\xbc\x11\xf0\x3a\x45\x05\xd8\xaf\xaf\x04\x89\x34\x85\xef\x82\x9c\x51\x48\xad\x52\xb1\xa1\x02\x64\xee\x05\x85\x63\xb9\xde\xbb\xf2\x41\xa4\xcb\x57\xef\xa3\x74\x4f\x79\x7c\xcd\xee\x7d\x4f\x4a\x08\x3c\x2e\x8d\x92\x2e\x41\x78\xc7\x12\x86\xf9\xb9\xe4\x22\xa8\x48\x94\xf4\x07\xfe\xce\xdb\xea\x82\xff\xdd\x73\x1c\xf0\xaf\x2b\xd9\x4e\x8f\x2b\x70\xc2\x9b\x46\xe0\x7f\xab\x40\x7c\xad\x40\x9c\x7a\xc7\x82\x38\x0e\x05\xfe\x35\x7b\xba\xf1\xee\xf9\x77\xef\x48\x03\xbf\x36\x7f\x83\x5b\x4f\x02\x9f\xe2\x0b\x85\x7f\x7f\xe2\xdf\x23\x9c\xf4\x71\x5e\xfe\x2c\x7b\xf2\x07\xe6\xbb\xff\x39\xfb\xfd\xc6\xdb\xea\x96\xb3\xf7\x95\xea\x27\x33\x72\x1b\xa1\xf8\x46\xa9\x72\x17\x41\x17\x24\xf5\x24\x6d\x39\x0f\xf8\x34\x7c\x70\xb3\x53\xba\x1f\xf0\x99\xd8\x98\x06\xf6\x65\xf5\xf3\x7e\xed\xf3\x0b\xb1\xca\xcd\x56\x64\x97\xab\x7e\xde\xad\x7d\x7e\x2f\x36\xe6\x98\xfd\x52\xfd\xbc\x57\xfb\xfc\x64\x73\xdd\x3f\x36\xd7\xfd\x69\x33\xf2\xb7\x9b\xdb\xfd\xfb\x66\xca\xff\xd8\xdc\xa9\x32\xd9\x48\xb9\x4a\x36\x96\xd6\xc9\x46\xca\x45\xb2\x91\xf2\x24\xd9\xd8\x2d\xbc\x46\x5a\xbd\x65\xc1\xe6\xe2\x51\x52\xde\x2b\x47\x23\xc3\xb6\x0e\x56\xae\x93\x9f\x23\x63\xa4\xcc\xc8\xef\x51\xea\xf6\xb4\x4e\xa1\x02\x5f\x98\x6c\xbe\xd9\xf8\x8e\xbc\x47\x14\x76\xff\xb8\x48\xef\x91\x54\x12\x8e\x3e\x89\xea\xfb\x92\x23\x32\x41\xb1\xa4\x69\xe6\xb3\xbb\x13\xa5\x3d\x69\xe5\x6e\xf7\xa3\x84\xfc\xe3\xb5\xb4\x17\x4b\x84\x52\x8b\x91\x50\x5e\xc3\xf9\x47\x4b\xb6\xfe\xe1\xfc\x83\x7a\x41\x88\xf1\x06\xd9\xf2\x13\x25\x24\xcd\xf6\x2d\x06\xd6\x35\xa3\x69\x8b\x38\x8d\x41\x38\x0a\x75\x0c\x98\x46\x7f\x14\x69\xfb\x09\xeb\x36\xb2\x0f\x92\x90\xe8\x90\x28\xdc\x91\x2c\x52\x92\x26\x65\xe3\xa7\x74\x13\x70\x42\x64\x71\xd0\x96\xe3\xc9\x0b\x1f\x02\xb6\x85\xca\x6e\x6f\x45\x2f\xd9\xa6\xdd\x90\x69\x08\x09\x48\x0c\x28\xf0\x5f\x82\x74\xf9\x09\x6e\x9b\xe0\x5e\x26\xc7\x1b\x9f\xec\xad\x8e\x41\x7e\x00\x8e\x68\x68\x77\x8d\xf4\xa8\x0c\x11\xa7\xd4\xc3\x2e\x0f\xa0\x03\xf7\xfc\x04\x1d\x21\x45\x6a\x04\xdf\x58\x38\x2f\x3d\x61\x46\xdc\xe5\x8b\xf2\x88\x8c\xcb\x8c\x31\x0a\xc9\x8c\x5c\x85\x36\x0b\xec\x23\xe4\x8f\x72\xfb\x07\x65\x58\xd3\xb1\xd9\x30\x0c\xf8\x9d\x67\x7b\xb0\xb2\x43\x77\x53\xee\x2f\x32\x23\xb3\x10\xf6\x40\xd2\xf9\x7c\x0b\x9f\xbb\x9d\x0e\x48\xf4\x22\x9a\x0f\xf8\xa3\x54\x78\xbb\x54\x98\x10\xd9\x66\x5d\xfa\x60\x6f\xde\xa1\x6d\x22\x1f\x74\x3b\x9d\x79\x87\xb6\x88\x7c\xb0\x87\x4f\xa5\xd4\x3a\x7f\xc9\x97\xd7\xa6\x79\x6f\x23\x8c\x7e\x2d\xb2\x6d\x24\x15\x98\xe7\x61\x65\x40\xfc\x9b\x72\x7e\xf8\xa4\x32\x53\x9e\x86\x70\x1a\x56\xb6\x16\x47\xc9\x2f\xee\x4b\xde\x91\x57\x21\x26\xe4\xb7\x57\x06\x18\xc3\x33\xaa\x60\x9a\x95\x31\xc9\xbe\xa3\x55\x22\x1c\xcf\x19\xf2\x71\x2c\x4a\x2b\xc5\x5d\x05\xcc\xdd\x9e\xcf\x1d\x67\x2b\xbd\x7f\xda\xac\x5a\xc5\xb1\xf0\x32\xe0\x67\x33\xd6\x97\x11\xbc\x0f\xe1\x54\x13\xd9\x72\x98\x53\x61\xf6\xeb\x64\xed\x0e\xa5\x73\xe9\xd8\x14\xd5\xb5\xad\xca\xa3\x0a\x21\xac\x60\xa5\xc7\x96\x95\xaa\x13\x83\xc9\x4a\xf8\xca\xbf\x60\x56\xcc\xe7\x9d\x43\x76\x47\x64\xb8\x4a\xa8\xe9\x0a\xed\x0b\xc0\xbb\x10\xfe\xcb\x13\xa9\x97\x47\x7d\x95\x32\xb4\x57\x39\xe4\x93\xe9\x04\xa7\x7c\x4f\xef\x69\xb2\x71\x91\x7a\xbe\x79\x99\xf9\x56\x9e\x18\xd2\xdd\xee\x9f\x26\xde\xf3\x84\xda\x1b\x1c\x8a\xd3\x82\x9b\xeb\x78\xba\xb9\x8e\xcb\x6a\x13\xd4\x10\x24\xc8\x61\x39\xe1\x7a\x15\x20\x1b\x68\x8b\xa7\x04\xf7\xae\x0c\x17\x27\xe4\x4d\x52\x15\x11\x1f\xca\xf3\xf5\x28\x24\xc7\xe4\x7c\x46\x06\x21\x04\x21\xf9\x9e\x90\x0e\xa5\xf0\x2e\x21\x27\x09\xee\x93\x00\x7e\xc9\x3e\x3f\xc5\x97\x70\x99\x90\xe5\xc1\x76\xfe\xfc\xd3\x8e\xf2\x3a\x80\xad\xce\x22\xc3\xf7\x0b\xa8\xec\x39\xc5\xfc\x1a\x60\x7a\x51\x6e\xc1\xab\xda\xec\x4b\x1d\x22\xd5\xd1\x38\xab\x02\x61\x48\x90\x4c\x63\xe9\x8b\x94\xe4\x35\xa0\xe2\xcb\xc7\xa4\xbc\x87\xe5\x7f\x2b\x42\xc7\x82\x57\x2d\x63\x20\x0f\xc1\x71\x60\x46\x3e\x87\x90\x68\x38\x4b\x8c\x14\x1f\x02\x4e\xef\xd7\x09\x7c\x32\x75\x7d\xa4\x14\x7e\x84\x66\x71\xc3\x65\xb2\xa5\xcc\x3f\x25\x45\xba\x5a\xc3\x4f\xf4\x2a\x7d\xc1\xdd\xa3\x13\xcc\x14\x76\x8a\x93\xf7\x18\x33\x8b\xbf\xc2\xfd\x01\xce\x41\x2e\x6f\x2b\x4d\x86\x39\x3d\x01\x4d\xe3\x0a\xb7\x8b\x7b\xce\x64\x39\x8d\x38\x9a\x21\x48\xe5\xc7\xc4\xfe\x7b\xad\xe1\x4b\x48\x9c\x61\x38\xd6\x42\x19\x79\x94\x6e\x2d\x45\x6c\x46\x5e\x84\xab\x75\x95\xdb\x24\x13\xa2\x74\x01\xaf\x88\x84\x63\x72\x7e\x49\x9c\x38\x1c\x0b\x19\x88\x81\x83\x3e\x84\xc4\x52\xb5\x85\xee\x0c\x33\xd6\xc4\x09\xe5\x55\xe8\x87\x7a\x19\x42\x64\x10\x93\x64\xc5\xd7\x24\xfb\xca\x03\x8d\x69\x9d\xca\x9f\x3b\x46\x80\xd8\xcf\x4a\x04\x22\xbc\x11\xca\x81\xbb\x94\xbd\x88\x33\x52\x51\x32\x75\x20\x32\x1c\x94\xeb\x05\x9d\x43\xdc\x2c\xea\x3b\xfd\x6c\x0f\xa5\x69\x40\x3c\xa7\x24\xe2\xbf\x26\xa9\x7e\xd4\xb6\xc7\xe7\xb6\xe7\xf3\xf4\x69\x50\x7a\x16\x45\x4f\xcb\x5e\x01\x2c\x2c\x90\x7d\xe0\xc5\x28\x0f\x70\x90\x05\x64\xdb\xd3\x1a\xd3\xc7\x6b\x4c\x1f\xaf\x5d\x51\x38\xa7\x2e\x43\x63\xa8\xa0\x3c\x0e\x00\x7f\xda\xe3\xcd\x18\x13\x82\xc9\x62\x04\xcd\xde\x67\x49\x46\xf3\x94\xd6\x58\x51\x98\x56\x14\x62\x45\xa1\x1b\x80\x66\x84\xb3\xd0\x1d\x50\x53\x9e\x71\x57\x40\xc0\xc2\x5a\xa5\x1d\xe0\xae\x0f\x3c\xab\x74\x89\x86\xc8\xf5\x21\x72\x03\x88\xdc\x01\x44\x86\x06\x4d\x33\x48\x43\x86\x42\x32\x0a\x4b\xec\xbf\xd2\x87\x83\xac\x0f\x07\xcb\x7d\x68\xba\x40\x60\x03\x13\xbc\x32\x97\x33\xe1\x06\x10\x30\x61\xc8\x62\x62\xa9\x27\x43\x5c\x05\x87\x6c\x45\x6f\x6a\x5c\xdd\x8a\xde\xcc\x12\x0c\xd8\xde\x2c\x17\x14\x8c\xe0\x75\x58\x14\xab\x25\x66\x5e\x09\x8a\x95\x4b\xac\x5c\x62\xe5\x72\xa9\x47\xb3\x60\x99\x94\x16\xa2\xcc\x80\xd3\x95\x83\xd9\x85\x10\x86\x20\x96\x68\x29\xf5\xe9\xb3\x25\x5d\xe7\xcb\x10\x26\x55\x2d\xe6\x65\x15\x66\xe2\xe7\x9b\xf4\x85\xb5\x5a\xd3\xa9\x7c\xb8\x0f\x84\x31\xfa\x83\x8f\xc6\x9a\xae\xc0\x7e\xae\xc2\x7e\x43\x2d\x21\x32\x0d\xc1\x7f\x8d\xda\x31\x1d\xc2\xa5\x0f\x2f\x12\xe2\x9c\xb7\xcf\xff\xfc\xf3\xe2\x7e\x41\xe8\x6f\xad\xbe\x0b\x7f\xfe\xf9\xe7\x9f\xff\x63\x7b\xfe\x6f\x7f\xfe\x19\x5f\x38\x94\xc2\x1d\x79\xe3\xa3\xdf\xf5\x32\x5a\x7d\x18\x27\x78\xbb\x30\xda\x91\x59\x04\xa8\x75\x8b\xd4\x0d\xa1\xf7\x75\xc9\xee\xdc\x67\xd3\x18\x30\x76\xed\x8d\x30\xd2\x4d\x52\xda\x72\x16\x4e\x65\x52\x7f\xd9\x6c\xb2\x3d\xa9\xad\xe0\xf5\x25\xfc\x47\xcd\x22\xac\xaf\xe1\x9f\xaa\xdf\x1f\xd7\x0d\xe9\xea\xe7\x47\x75\x43\xfa\xd7\xb5\xd8\x5b\x59\xd5\x62\xef\xab\x03\xf6\x47\x19\xd3\xef\x09\xb1\xe2\x59\xf2\x89\x70\xe0\xcc\x27\x66\xd4\xac\x74\xb4\x69\x9d\xd2\x77\xdf\x52\x21\x1d\x1f\x8b\x91\xb8\x75\xe0\xad\x59\x48\xfc\xab\xec\xed\xf3\x1f\x09\x1f\x9b\xee\x9d\x0e\xe1\x99\x6f\x97\xbe\xb7\x43\x43\xc3\x88\xd6\x16\x68\xc9\x57\x87\xc3\x29\x5e\x2c\x79\x33\xf2\xd2\x30\x42\x77\x6f\xaf\x43\x5b\x0f\xbb\x8f\xf7\x0e\x1e\x19\x09\xa5\x0e\x3b\x7d\xd5\xee\xee\x1d\x74\x1e\x1f\x78\x8a\x3e\xc0\xa7\x87\xf3\x8e\x99\xa5\xf6\xf5\xc3\xdf\xb4\x99\x7e\xa2\x4d\x04\x7e\x45\xb3\x44\x3c\xd8\x3d\xd8\xdf\xb1\xc6\x8a\x7d\xfd\xf8\x60\xde\xa1\xd4\xbc\x9e\x67\xd1\xb9\xf7\xfc\x77\x8f\x48\x26\xda\x64\xf7\x60\xff\xb7\xa4\x45\x92\xd4\xbc\x49\x52\xf3\x86\xd2\x36\x21\xdd\xfd\xdd\xdf\x88\x62\x64\xff\x37\xd9\xda\xa1\x0f\xba\xfb\xbb\xa6\x86\x1d\xfa\x60\xdf\xfc\xdb\x05\x3e\xf5\x04\x53\x2d\xa2\x0e\xbb\x9d\xfe\xae\xd7\x7e\x4c\xc1\xdf\xf5\x92\xd6\x5e\xa7\xf3\x9b\x6e\x91\x9d\x43\xd1\xef\x78\xdd\xb2\xa9\xa3\x79\x65\xfe\xc8\x18\xf6\x60\x46\x74\x0c\x22\x46\x2e\x25\x4e\xdb\x30\xb0\x8c\x61\x07\x56\x6d\xd6\xd5\xb7\xea\xba\xb5\xad\xba\x9d\xda\x56\xdd\x6e\x2d\x33\xc3\x5e\x2d\x8f\xc3\x7e\xed\x38\xfa\x41\xf5\xf6\xc3\xc6\xc3\xda\xcd\x83\x8f\x6a\x89\x00\x1e\xd7\xae\xca\xea\x76\xea\x57\x5f\x75\xbb\xf5\xdd\xc2\xee\xce\x62\x41\x66\xe4\x93\x9f\x36\xba\xda\xea\x19\x79\xef\x17\xbd\x71\x5a\x7a\xff\xa5\xf4\xde\x2b\xbd\xff\xb1\xe6\xbd\x2a\xf5\xaa\x9b\xbe\xdf\x85\x19\x79\x92\x57\x8c\xff\x6b\x39\xdf\x4a\x62\x41\xf0\x8a\x2a\x76\x65\x73\x98\x5a\x71\xef\x8f\xa8\x59\x86\x8a\x7d\x92\xfb\xe0\x95\x87\x72\xef\xa3\xb7\xa5\x9a\x4d\xbc\xdc\x7c\x3e\xd7\x7d\x7c\xde\xf1\x76\xbd\x2e\x3a\x44\x5d\xff\x5b\xd9\xd5\x54\x99\x11\x87\x9d\x54\x65\x1d\xc7\x64\xe5\x44\xaf\x07\x13\x49\x34\xc3\x54\x29\x36\xa8\x71\x49\xd4\x03\x31\xef\xf4\x65\x8b\x7c\xd3\xf6\x99\xb6\x88\x6e\x39\x0d\x87\x52\x0f\x77\xd9\x42\xbb\x7d\xb0\xc0\x09\xec\x00\x0f\x8d\x21\x00\x83\x98\xe2\x49\x8f\xc2\xcf\x55\xa2\x6d\x2b\x77\x19\x34\x9b\x5b\x65\x9f\x41\xe6\x41\xb0\x4e\x83\x92\x13\xac\xdc\x30\x33\xad\xac\xdf\xc0\xa8\xc8\x93\x18\x24\xec\xd1\x76\xfa\xd4\xed\x74\x68\x2b\x7b\xdb\xe9\x94\x29\x88\xf8\xff\x1f\xd9\xbd\xf0\xdc\x95\x9b\x87\x71\x0f\x5d\xc3\x89\xf7\xd5\x66\x75\x6a\xcd\xea\xd6\x9a\xb5\x53\x6b\xd6\x6e\xad\x59\x7b\xb5\x66\xed\xd7\x9a\x75\x50\x6b\xd6\xc3\x5a\xb3\x1e\xd5\x5b\xf5\xb8\x7e\x55\x5d\xb7\xb3\xd4\xcc\xb2\xdf\x7d\x58\x9a\x45\x44\xb0\x19\x99\xe1\xb0\xa3\xcc\xc6\xdb\x9a\xa4\x99\x51\xf8\xda\xe8\x6c\x28\xb0\xf1\xbd\xce\xc1\x8d\xaa\xd4\xdd\x3b\xe8\xe2\x6b\x51\x81\xcd\x95\x77\x23\x67\x55\xab\xdb\xe9\xfc\x26\x5b\x7b\xbf\xe9\x96\x70\x79\x8b\x08\xd7\xef\x77\xbd\xb2\x5b\xca\x2f\x53\xa3\x98\x21\x8e\x29\x26\x29\xdc\x73\xed\xc9\x76\xc0\x89\x42\xc1\xad\xca\x01\xd3\xa3\xd8\xe8\x85\xbb\xa8\x1e\x72\x5d\xc2\x16\x97\xd9\xfc\xd1\xc1\x9e\x40\x46\x7f\xd8\x7d\xdc\x3d\x28\xdf\x13\x33\xe6\x95\x25\xfc\x4d\x0c\xdd\x5d\x2b\x77\x4a\x2e\x3d\x5e\x8f\x09\xef\x4f\x34\x71\x4e\xae\xa2\x64\x3c\xc0\x1b\x7c\x7d\xd1\x10\x93\xa9\xbe\x73\xa8\x37\x23\xaf\x63\xd8\xd6\xc4\xf9\x5d\x45\x72\xd4\x78\x7d\xf2\xf1\xd1\x41\xa7\xdb\x18\x46\x6a\xc2\xb5\x43\x61\x5a\x33\xee\x6f\xca\x04\xbc\x23\xd7\x11\xdc\x1f\x1b\x41\xb3\xd5\xa1\x70\x92\x3d\x9c\x66\x0f\x5f\xb2\x87\x57\xe6\xe1\xbd\x31\xa9\xde\x08\x10\x1c\xe5\x64\xd9\xa7\xb0\x5d\x46\x1b\xa0\xdc\x92\x94\x6e\xa2\xbb\xea\xa6\x9a\x54\x04\x6c\xf0\x0a\xcd\xa0\x8f\x56\xb8\x7d\xab\x86\x4a\xa6\xd3\x44\xd7\xa5\xc0\x25\xd9\xea\xc2\x56\xa7\x9e\x1e\xd4\xbe\xee\xd6\xd3\x08\x5d\x92\xad\x0e\x42\xd7\x18\xd6\xbe\x37\xab\x33\xa1\x98\x97\x31\x97\xea\xfe\xc8\xf4\x41\xe2\xfa\x14\xfc\x2b\x4f\x82\x1f\x79\xca\x08\x72\x51\xf6\x59\x56\xd7\x89\x2c\xb7\x56\xea\xfb\xf3\xbf\x63\x87\x7c\x1a\x0b\x43\x4a\x10\x4d\xa6\x63\xa1\x45\x83\x0f\x06\xa1\x1c\x61\x08\x1e\x9e\x9e\x32\x56\xb6\xa7\x5c\xbf\x3f\xd3\xb6\xbf\x27\x78\x3f\xa8\x67\xca\xbe\xb7\x00\x71\x83\x2b\x81\x99\x32\x42\x25\x06\x65\x3f\xd6\xb4\xca\x60\xbf\x9b\x45\x4f\xc4\x70\x4b\x7e\xc4\xd0\xed\x1a\x2b\x2c\x06\xe4\xbb\x9d\x6c\xb9\x2b\x79\x49\x97\x99\xb3\x5b\x67\xce\x19\xdf\xb8\x1d\x73\x57\xed\x00\x6b\xce\xf2\xaf\x76\x20\x3f\xa3\xd7\x82\x9f\xa1\x41\x14\xa3\x41\x74\xd3\x2b\xb6\x01\x67\xe4\x6a\x0c\xdb\x46\x69\xe5\xa7\xb8\x1b\x98\xbd\x30\x26\xa4\x17\x40\x6c\x5f\xdc\x8c\x81\xe3\x66\x1f\xfe\xfa\x1c\x9b\xa9\xfd\x8d\x42\x82\xdb\x7e\x0a\xfc\xcf\xf6\xc3\x00\x23\xb1\x6a\x3b\x7f\xb7\xe5\x16\x1a\x2b\x92\x48\x86\x63\x46\xdd\xed\xfe\x64\xec\x99\xd6\x55\x9c\x44\xd7\x7c\xe3\xfe\xd1\xd1\xe6\xde\x38\x29\x57\x77\x35\x24\x47\xe3\xea\x64\x3c\xdd\x5c\xfc\x39\xdf\xec\x74\xdc\xfc\xf9\x7b\x45\xa3\xa8\x26\xd8\x29\x39\x1d\xab\x52\x10\x95\xf7\x33\xab\xbb\xcf\xc8\xe7\xb1\x8d\xb1\xc8\x6d\xb7\x3f\xff\xec\x5b\x0f\x90\xcb\x0f\x28\xa5\xae\xdf\xbf\xc4\x54\xf0\xca\xf5\xad\x03\x2d\x0b\x38\x55\xae\x8f\x31\x19\xd4\xbb\x24\xce\x03\x07\x8e\xf2\xf3\x47\x58\xc9\xf1\x18\x5e\x8c\xe1\x1d\x91\x70\xcf\x0f\x70\x2f\x37\xf1\x0c\x9e\xe0\xd4\x30\xfe\x82\xe2\x78\x7c\x8b\xcc\xbb\x92\x03\xb4\x44\x2a\x92\xdd\x5b\xca\x9c\x96\xad\x7d\x99\x6d\x66\xca\x57\x57\xc1\xd4\xac\x2a\x3e\xec\x78\x15\x73\xac\xf8\xb0\xeb\x95\x2f\xe9\x2f\x7f\xe9\x14\x5f\x0e\x2a\x1f\xf2\xa5\xf5\xc9\xb8\xb6\x96\x7e\x19\xd7\x16\xdb\x1f\xe3\xba\xe4\x79\x3f\x2e\x0d\xde\x1b\xbe\x71\x63\xf2\x1d\xdf\x6c\x87\x7e\xa8\x7e\x7f\x58\xfb\xfc\x8a\x6f\xb4\x33\xcf\xaa\x9f\x0f\x6a\x9f\x5f\x97\xc6\xe1\xa3\x2e\xed\x04\x76\xd3\x6b\x4d\xa4\x67\xcc\x23\x54\x49\xa9\x6b\x0c\xc1\x4f\x21\x44\x98\xd0\x99\x56\xef\xf5\xfd\x58\xb5\x7b\xf4\x00\x8e\xc9\xb9\x1c\x10\x87\x8f\x85\xd2\x0d\xfc\xdb\x9e\x71\x25\x43\x39\x72\xe8\x05\x35\x9f\x93\x01\x41\x02\xaa\x06\xe5\x71\x19\x93\x4a\x88\x36\x4b\x95\x3f\xc0\xb0\x4e\x54\xc7\x39\x7c\xf4\x3a\xa9\xde\xbd\xa8\x48\xbd\xaf\x55\x22\x06\x03\xc8\xee\x9d\x88\x45\x75\xba\xfe\xac\x42\xfe\x31\x06\xe7\x4a\x89\xa1\x03\x0f\xfe\xc7\x77\x7e\xc3\xed\xfd\x2f\xde\x83\xd0\xd5\x22\xd6\x84\x48\x26\x69\x7e\xda\xf3\xc1\x9f\xf1\x83\x11\x38\x0e\xa5\x98\x74\xb8\xec\xaf\xa9\x62\x7d\x23\xea\xfb\xaf\x25\x67\x6d\x62\x7b\xb1\xee\xb2\x9d\x62\xc7\x21\x79\x70\x3b\x20\xce\xa5\x3f\xe6\xf2\xba\xd4\x63\x12\x7b\x0b\x5e\x9b\x09\x3a\x1b\xc0\x1d\x26\x73\x3b\xab\x74\xc3\xcb\x8a\xe4\x36\xba\x98\x2c\x07\x30\x9d\x0c\xe0\x0c\x70\x27\xe0\x28\x1f\x25\x2d\x6e\x75\x5b\x46\x33\xc5\xa7\xa5\xba\x54\xcb\xf1\xcc\x4f\x0a\x33\x72\x9d\xc3\xce\xda\xdd\x4e\x07\xa1\x9e\x2d\x8f\xde\x0b\xbe\x29\x22\x6c\x3e\x5f\x19\x00\x36\xcd\x71\xfb\x5a\x36\x7c\x2d\xdb\x51\xa2\xc7\xa1\x14\xed\x50\x0e\xa3\x86\x1f\xa9\x81\x50\xed\x8e\x43\x01\x3b\xc6\xd2\x37\x23\x71\x5e\x6c\xc8\x1b\x43\xde\xc6\x12\xc1\x15\x57\xba\x31\x51\xed\x1d\x24\xf1\x8c\x42\x32\x20\xce\x49\x94\xa8\x40\x60\x5b\x3c\xf3\xbb\xbc\xce\x7e\xde\xdc\x5d\xba\x4c\x5c\x1b\x7d\xcc\x88\xbe\x31\xf1\xd3\x3a\x90\x18\xb1\xd4\x86\x78\x92\x51\xae\xc2\xd1\x95\x6e\x77\x1a\xd8\xcb\xd6\xed\x6d\xba\xf4\x66\x00\x4e\x12\x0b\xd5\x8e\xc5\x58\x04\xda\x01\x27\x94\xa1\x0e\xf9\x38\xff\xda\x9e\x44\x3f\xdb\x7f\x01\x32\x13\xfe\x75\xa8\xff\x02\x2a\x25\x24\x88\xc6\x91\x72\xc0\xf9\xb7\x20\x08\x2a\xe3\xfc\x0f\x96\x6f\xf1\xdb\xe1\x1e\xae\x6c\xce\xa8\x3d\xe4\x03\x31\xa8\x0c\x51\x2c\x82\x48\x0e\xb8\xba\x73\x28\x7c\xe5\xe4\x98\x93\x4b\x8c\x02\xa3\x14\x26\x03\xe2\xbc\xc0\xcd\x86\x86\x7f\xd7\xd0\x57\x61\xdc\x18\x73\x5f\x8c\x4b\x55\x3b\x2d\x1c\x94\x0a\x0f\xbd\x2f\x6b\xcd\xff\xf6\x20\xdd\x6a\x88\x1f\x48\x31\xeb\xdb\xbd\x0b\xe6\xb4\xde\x87\xe4\x7d\x4d\x21\xfe\x52\x1a\xc8\xb3\x84\x48\xd7\x3f\x71\xfd\x8f\x95\xe8\x86\xbf\xc9\x6b\xef\x39\x59\x9a\xc6\xb9\x52\xf5\xc4\x6e\x2f\x77\xd2\xbd\x11\x5f\x9b\x1a\x71\x0f\x68\x1d\x83\xfa\x62\x3c\x6e\xc7\x63\x1e\x5f\xb5\xa3\x65\x16\xb5\xcd\xb4\x3c\x6a\xcf\x02\xc1\x26\x72\x07\x5c\x8e\x4c\xc7\x56\x08\x2e\x77\x97\xd3\x52\xbf\x40\xc9\x3a\x3a\x06\x48\x48\xa9\x77\x9f\x54\x67\xf6\xf7\x41\xed\xcc\xef\x8f\xea\x77\x51\x5a\x01\xc2\x91\x34\xac\x39\x6c\x07\x42\x1a\x66\xc8\xab\xb4\x3c\x70\x69\x44\xa1\xff\xb9\xc6\x05\x9f\x56\x08\xf3\x2b\xd3\x66\x07\xf0\xb6\x2c\x09\xc7\x83\x32\xfc\xdb\xcd\x1a\xd8\xef\x7c\xa3\x3b\xf8\x8f\xd5\x56\x8b\x2c\x6f\x08\xa6\x76\xc3\x96\x9e\xcf\xf1\xe4\x2c\x5a\x0f\x3b\x36\x71\x3d\xde\xb0\x94\x1b\x12\x95\xa8\xa7\xa0\xd4\x8e\x55\x0c\x55\xf1\x45\x49\xd7\x1f\xa5\x2e\x29\x5d\xf5\x45\x65\x1c\xe7\x47\xb0\xce\x17\x85\x8e\xdd\x45\x35\xd0\x43\x95\xab\xbf\x24\x12\xd3\x7b\x14\xee\xca\xa0\xba\x99\x3e\x30\xa2\x63\x9a\xe8\xb4\x8f\x55\x90\xf5\xf5\xdb\x41\x85\x19\x44\xb0\x34\x38\xd7\xe2\x6e\x10\xcd\x64\x3e\x3a\xbf\x57\x46\x27\x59\x59\x20\x99\xae\x01\xe7\xc1\x1a\x75\x22\x88\xc6\x8d\x20\x1a\xb7\x79\xa2\xa3\x42\xf8\xfe\xa2\x8c\x1e\x6e\x9c\xfb\x56\x7e\xcd\xc8\xcb\x01\x6c\x61\xb4\x4c\xce\xa0\xb8\x41\xbc\x56\x30\x56\x27\x64\x19\x4b\xb7\x82\xc5\xf9\x7f\xfe\xef\x4c\xd6\x55\x18\x3d\x08\x36\x6a\x78\x51\xb0\x51\x7f\x0c\x97\x3b\x76\x12\x25\xb1\xc0\x99\xb6\xac\xf8\x0c\xd7\x80\x8f\x05\xbf\x11\xcb\xe0\x7e\xb0\x51\x7f\x8c\x83\x8d\x36\xd5\x38\xd8\x38\xe7\x06\xc1\xc6\x19\x7b\xb3\x4c\xaa\x3f\x4e\x56\xb4\x69\xfb\x7f\x23\xb3\x04\x37\x75\x66\x91\x7f\x93\x51\x0c\x86\x5f\x63\x94\xc9\xe6\xee\xbc\xda\xcc\x28\xd3\xcd\x63\x35\x0a\x36\x46\xb4\xcc\x36\x33\xc2\xdd\x66\xe4\xb7\xc1\x46\xfb\xf6\xba\x56\x77\xfd\xfb\x51\xf0\xbf\xd6\x67\x5c\x33\xe7\x1e\x96\xfd\x00\x41\xdd\xb1\xb2\x53\xf7\xab\x9c\xae\x63\xc7\x01\xd7\xa2\xa2\x03\xd6\x94\xfa\xc2\x5b\x10\xe4\xeb\x02\x1e\x1d\x9c\x0e\x61\x18\x92\x0e\x1a\xeb\x53\xba\x4a\x35\x0d\xf8\x58\x18\x25\xec\xb2\x31\x89\xa4\xbe\xca\x50\x13\xc5\x34\x94\xe1\x54\x34\xb3\x10\xed\x2b\xcc\x7e\xb4\x6a\x56\x4c\x95\xb8\x69\x23\x50\x63\xd0\x1e\x8e\xc5\x6d\xba\x6c\x5b\x86\xfd\x79\x93\x17\x79\x79\x93\x2f\xf1\x4a\x45\x33\x67\xad\xb6\xc1\xe5\x68\x2c\xda\x63\x31\xd4\xe6\xd7\xee\x6d\x23\x48\x54\x1c\xa9\xf6\x34\x0a\x2d\x62\xd4\x3e\x2e\x32\x11\x5b\x90\x62\x49\x35\x5d\x56\x27\x25\xef\xba\x6f\xc6\x10\xb2\xfb\x6b\x69\x14\xd3\xb3\x1b\x38\x4b\x95\x99\x5f\xd8\x61\x73\xde\x70\x99\x18\xed\xb5\xca\x45\xce\x0b\xe1\xab\xd2\xfb\x8c\x9b\x9c\xf7\x5c\x05\x57\x4e\x95\xa5\x9c\xa3\xa9\x0a\xc7\x4e\x95\xaf\x9c\xf7\x3c\x2b\xbc\x9f\xd7\x95\x48\xe1\x54\x1d\x0c\xce\x9b\x64\x9c\xc1\x3d\xcc\xf1\x25\xa3\x24\xd6\x4e\xd5\xf3\xe0\x9c\x88\xa9\x16\x13\x5f\x28\xa7\xea\xce\x77\x3e\x06\x3a\x2a\x5e\xe7\x5e\x7d\xe7\x43\x74\x93\xc2\x57\x39\xda\x79\x26\x02\xfb\xa1\xb4\x51\xa7\xe8\x72\xff\x4b\x64\xd6\x75\xac\xf0\xf5\x3f\xcd\x0a\x68\x10\xfd\x35\x2f\x5c\xd8\x01\x3d\xbe\xc1\xac\x1e\x95\x29\xf2\xad\x3a\xc9\x42\x05\xce\x84\xdf\xda\xf3\x77\x0e\xd4\x43\x65\xbf\x07\x15\x07\xda\x9b\x3e\x36\xf2\xac\xce\xf9\x93\x68\xc0\xc7\x0d\x63\xe2\x34\xe2\x2b\xd3\x8a\xd4\x7a\x1a\x84\xf1\x74\xcc\xef\x1c\xb3\x00\x45\xc1\xf5\xaa\x49\x83\x45\xdb\x83\x90\x8f\xa3\x51\xa3\xfc\x23\xed\xb1\x62\xba\x2f\x97\x0a\x6c\x02\xb2\xf5\x00\xf5\xb9\x5a\x2c\x2d\xc1\x38\x8a\x45\x63\x92\x2d\x71\x66\x54\x6e\x03\xf2\xfc\xa6\xbc\x9a\xdc\x3a\xab\xa7\x96\xc1\x6c\x73\x41\x67\x78\x83\x01\x5c\x07\x80\xd1\x2e\x37\x15\x60\x34\xf9\x1b\x86\x4e\x1e\xca\x82\x12\x94\x53\x15\xc0\xa9\x6e\xef\x35\x8c\x90\xf9\x9e\xc4\x3a\x1c\xde\x65\x6d\xab\xcd\xdb\x19\x79\x62\xc6\xb4\x63\x0a\xe3\x53\x37\x1b\xee\x35\x74\x0e\xa3\x48\xaf\xee\x81\xc9\xb8\xbd\xd3\xa8\xaf\xb0\x71\x12\x04\x22\x8e\xcd\xb2\xbe\xa1\x63\x9e\x72\x19\x58\x63\xb4\xba\x5e\x57\x50\x4e\x55\x38\xc9\x8d\xdb\xdb\x80\x7c\xab\xa0\x38\x11\xba\xf1\x8c\x6b\xf1\xe0\x34\x9c\x88\xd2\xa2\xbd\xbe\xc3\x79\x70\x3d\x50\xd1\xb4\xcc\x65\x19\xc7\x7b\x19\xb8\xe5\xba\x60\x1c\x4e\x1d\x70\x94\x08\x34\xe9\xe0\xa5\x17\x1d\x9a\xb3\xe4\x34\x8a\x43\xbc\x10\x13\x9c\x61\x78\xbb\x81\xbb\xb0\xa2\xcc\xbe\xfb\x0b\x7a\x0a\x52\x4a\x5e\xe5\xcd\x2b\xfb\xe5\x66\x1d\xee\xcd\x66\xad\xe2\xdd\x66\xbd\xe0\xc3\xf2\x24\x57\xd1\x2c\x5e\x9e\xdf\xaf\x36\xe3\x39\x0b\xca\xb1\x28\xdf\x35\x1e\x12\x3a\x26\xe7\x25\x6b\xd9\x01\xdc\xe3\x70\xc4\x20\xd4\x8e\x19\x3a\x74\xa0\xff\x1d\x5f\x81\x2a\xb1\xc5\x73\x83\xc5\xf4\x63\x11\x4a\xea\x9f\xb8\xfe\x71\x5f\x79\x7f\x13\xeb\x8c\xcc\xb6\x0d\x69\xd7\x76\x2f\xa5\x54\xc7\xb1\xb0\x37\x2d\x38\xd5\xf1\x7a\x5d\x6a\x2b\x06\x36\xa8\x22\xb0\x41\x95\x3d\x8d\xdf\x35\x6c\x19\xa3\xb3\xd9\x54\x7d\x87\x39\x9e\x79\x9e\xcf\x55\xbf\x78\xf7\x1f\x8e\xe7\x6c\xe1\x1f\x86\xdd\x65\x23\xc0\x5c\xff\x5b\xa5\xc2\x8f\x9b\x2b\x04\xc5\x3e\x0b\x72\xcd\x6b\x4e\x5c\x1b\x53\x91\xd5\xd5\x59\xaa\xbd\x1a\x5c\x51\x8a\xe7\x9c\x91\x1f\x37\x70\x8d\x89\x94\xb1\xa9\x65\xcf\x71\x90\xba\x59\xab\x39\x57\x1a\xff\xe7\x4c\xab\x22\xf9\x8b\xeb\xff\xb0\x1b\x6a\x6e\x60\x03\x3f\xb8\xcb\xf7\xd0\xdd\xc0\xdd\xe0\xa0\xa2\xe2\xfd\x7f\x76\x99\xba\xdb\xae\x7e\xc7\x9b\x03\x9c\x4d\x96\x12\x2e\x67\x56\xd4\xaa\x95\xf6\xd0\x5f\x2c\x61\xfa\x57\x56\x0f\xf1\x5f\x10\xd1\xa5\xbd\x86\xcd\xd6\xd6\xcf\x55\x26\x4b\xae\x5c\xa6\xc1\xcd\x35\xd5\x72\x2a\xe4\x20\x94\xa3\x25\x6d\x4d\xdc\x4e\x71\x6f\xb8\x84\xfe\x59\x55\x32\x3e\x5f\xc1\x25\xc5\x38\x7c\xdf\xae\xbf\x33\x9d\xfa\x81\x4f\x84\xd7\xb0\xab\x1f\x6e\x46\xf8\xd1\x52\x87\xfc\x15\x8e\xa3\xc1\x40\x89\x38\xae\xa0\xe1\xaf\x96\x0c\xd7\x97\x41\xc5\xcf\x15\x59\x3f\xd7\x89\xdd\x4a\x8e\x7b\x65\x9f\x61\xda\x8c\x37\xdb\xf6\x09\x97\xe9\x71\x12\x6b\xa1\x1a\x27\x98\x2c\xd7\xd6\x54\x0a\x22\xc0\xc4\x25\xe5\x6d\x15\xc7\xb1\xf3\x0c\x77\xad\x56\x19\x4b\xc3\x48\x4d\x52\xbb\xbf\xa2\xab\x16\x6d\x0c\xa2\x71\x3b\x9e\x54\x9c\x94\xb6\xbf\x9c\xa5\x2e\x4a\x41\xbb\x9d\x3a\x63\x9b\x1e\x20\x55\xd8\xff\x5c\xc5\xb6\xdd\xbf\x52\x75\xe6\x75\x25\x92\xd9\x30\x50\x9f\x0f\x46\xc2\x81\xad\x4e\xa5\xc7\xd6\x87\x5d\xd8\x02\x99\xfa\xe4\xac\x8c\xc1\x48\x61\xb2\xbd\xbc\xd5\x91\x17\x29\x50\xea\xe9\x40\x9f\xe3\x82\x18\x2a\xe5\x20\xdd\x58\xce\xa2\x77\x13\x63\xa0\x5e\x0d\xe1\x75\x92\x46\xec\xe5\x0d\xff\x15\xe3\x4d\x09\x3e\x58\x32\xdd\x62\xa1\xcd\x7a\xba\x3c\x91\x06\x61\xcc\xfd\x31\xce\x24\xa2\xcb\x42\xa0\xca\x4e\x62\x0d\x3b\x89\x7f\x25\x3b\x7d\x12\x42\x15\x83\x7a\xb5\x66\x50\xb1\x8b\x9e\x05\x20\x33\x7e\xaa\x6e\xb6\x2d\x79\x1a\xd2\xe9\x33\xbb\x29\xa6\x4f\x79\xda\xfc\xd7\xc9\xfe\x32\xd5\xe1\x2f\xcf\x83\x0f\xb1\x11\x2b\x7f\xe4\xdd\x6c\xe4\x80\xcb\x8f\x69\x1a\x4e\xbe\x0d\xeb\xa6\xfc\x57\xa1\x62\x54\x66\xa4\x8d\x7e\x32\x2b\xf2\xbf\xa8\x01\x4f\x14\x97\xc1\xd5\x2f\x36\x40\xb9\xfc\xc3\xaa\x85\xe2\x3f\x59\x75\x12\x8e\x07\xc6\x5a\xf8\xf5\xda\x3f\xfe\x8b\x6b\xff\x12\x0b\xf5\xeb\xb5\x7f\xfa\xd7\xd5\xfe\x32\x4a\xc7\xf4\xd7\x6b\x7f\xfc\xaf\xab\xfd\x58\xdc\x84\x7f\xab\x72\xff\xc5\xbf\xae\xf2\xbf\xdb\x70\xff\xb2\xec\xfe\x40\x6b\x9c\x7f\x71\x7d\x05\xe5\x19\x5e\x59\x20\x23\x39\x0c\x47\x19\xfa\x0f\x39\x31\xd3\xf6\x5e\xb1\x01\xcc\x83\x6b\x43\xb9\x1c\x38\xe0\xfc\xdb\xf0\xe1\xf0\xe1\xf0\x71\xfe\x71\x18\x49\xdd\x1e\xf2\x49\x38\x36\xca\xe3\x24\x92\x51\x3c\xe5\x81\x28\x1a\xf8\xae\xa8\x4d\x96\x88\xbb\x58\x3e\xb0\xf1\xb9\xec\xba\xb4\xc7\x95\x98\x74\x83\xb3\x2c\xc0\x87\xe7\x01\x3e\x79\x40\x0d\xfa\x1d\xde\x70\xc0\x33\xab\xc1\x37\x58\xad\xf4\xca\xa8\x3d\x4a\xb4\x16\x2a\x2e\xc8\xfa\x88\x5f\xaf\x6f\x88\x33\x0c\xc5\x78\x10\x0b\x5d\xee\xf7\x17\xa1\x8a\x75\x63\xc0\xef\x1a\xd1\x10\x43\xf2\x66\x42\x5c\xe7\xa3\x70\x8b\x1e\xab\xf7\x83\xe5\xb2\x77\xe4\xe8\x06\x9c\xf7\x91\x1c\x18\x5d\x7a\x2b\x71\x83\x5b\xe0\x9a\x82\x7d\x7f\x92\xd8\xf7\x5d\x7b\x4f\x1d\x7e\xb2\x18\xe5\x4d\x85\x4f\xd0\x6b\xb9\xd2\xdb\xeb\x7c\x88\xb4\xf0\x1a\xa7\x57\x61\xdc\x30\x6b\x55\x28\x47\x0d\xf3\xc8\x6f\x6c\xc2\xc3\x71\x14\xf0\x71\x23\xd6\x91\xe2\x23\x61\x88\xbf\x8b\x12\xd5\xf0\x8d\xe9\x6b\xb5\xd8\xdc\x59\x52\x8b\x55\x9a\x91\x70\x00\x2f\x02\xb0\xd6\xe6\x69\xfd\x2a\xba\x95\x9b\x8e\xef\xad\x19\xf2\xc6\x1a\x21\xcf\x30\xae\xcf\x7f\xbb\x1c\x8f\xb5\xeb\xd9\xf4\x51\xaa\x9c\xcd\xe8\x72\x1b\x04\x68\xe0\xb0\xb5\x95\xe4\x37\xbf\x57\x15\x06\x3e\xa8\x6b\x07\x1f\xb9\x4d\x21\xb0\x20\xd2\x0d\xba\xf5\x0b\xe3\x67\x64\x74\x63\x1a\xb0\x0f\xdc\xe5\xf5\x5b\x29\xb5\xb5\x2e\x83\x0e\x75\xf9\xb6\x3d\xaa\xfc\x15\xf0\xc6\xb4\x3b\x1b\x31\xff\x76\x0d\xf7\x4c\x7c\x33\x17\xaa\x7c\x93\xbe\xdf\x71\x28\x58\x16\x1a\x6b\xa1\xda\x3e\x57\xed\x3c\x76\xb3\xcc\x4c\xe3\xd4\x84\x30\x43\x1d\x0c\xd2\x03\x95\x1c\x3e\x0b\x9c\xd3\x37\x70\x1f\x5c\xe2\x69\x2a\xab\x58\xc0\x1d\xb9\xd9\x86\xd3\x6d\xcc\xcc\x05\xb7\x64\x7b\x1b\xae\xb7\xc1\xde\x97\x7b\x41\x69\x6d\x4b\x02\x31\xbe\x10\x70\x4b\x06\xdb\x70\xa4\x31\x9f\x90\x74\x03\x63\xf4\x07\xdb\xb4\x1e\xf0\x5a\x81\xbe\xd6\x84\x63\x34\xd6\x00\x1c\xa7\x02\xff\x70\xfd\x10\xdc\xdd\x94\xd3\x91\x54\xb5\x88\x57\xb9\xfc\x90\xfc\xc6\x37\x8a\x0f\xfe\xd3\xd6\xd1\x68\x34\x16\x46\x7f\x6a\x4f\x06\xd9\xcb\x31\x3a\x72\xf3\xb8\x90\x89\xdf\xde\x6f\x4c\x75\x7b\xb7\x31\xf5\xdb\xbb\xf5\xe8\x13\x3f\xd2\x3a\x9a\x38\xe0\x74\xa7\xb7\x8d\x38\x1a\x87\x83\x86\x1a\xf9\x9c\x74\xa0\x61\xff\x73\xbb\x3b\xfb\xb4\x18\xa6\xb3\x92\x58\xad\x79\x1e\xcb\x5e\x94\x94\x14\x5f\x71\x39\xc8\xa2\x20\x2a\x46\xca\x58\x28\x3d\xe1\x92\x8f\x8a\x01\xbc\xaa\x97\x96\xfc\xa6\x50\xb8\x5e\x6e\x13\x49\xe1\xc5\x36\x5d\xa5\x36\x17\xf9\xf4\x76\xbc\xea\x28\xa6\xca\x62\xad\xaf\x97\x16\x90\x50\x8e\x43\x59\x72\xda\x2e\xb7\x68\xcd\x86\x63\x2d\xbe\x43\x8a\x59\x45\xa8\x88\x59\xa3\x1c\x43\x62\x34\x45\xab\x30\x56\x74\xc6\x2f\x35\x8f\x5c\x7d\xbb\xf9\x49\xed\x7b\xfd\x60\xdb\x8f\xda\xf7\x7a\xc4\xe1\xa7\x32\x37\xfd\x21\xc9\x50\x55\x9d\x34\x6f\x83\x4a\x1a\xa7\xca\x64\x4d\x9f\xbe\x6c\x03\x86\x1c\x06\xaf\x69\xcb\x19\x87\xfe\x03\x3f\x8a\x74\xac\x15\x9f\xb6\xf7\xdc\x8e\xdb\x69\xf3\xf1\xf4\x8a\xbb\x07\xed\x41\x18\xeb\x07\x41\x1c\x17\x00\xee\x24\x94\x6e\x60\x4c\x97\x2f\x81\x19\xcc\x2f\xdb\x20\x2d\x0e\x5c\xe3\xf8\x4c\xc4\xd1\x44\xb4\xf7\xdc\x87\x6e\x07\x4b\x96\x5f\x17\x85\x7f\xd4\x0a\x8b\xf1\xa4\x3d\xe0\x5a\x4c\xc3\xe0\x5a\x28\x2c\x58\x7d\x65\x8b\x3d\x09\xea\xd6\x84\x35\x1c\xde\x10\xe5\x06\x78\x06\xf4\xce\xfc\x91\xb4\x97\x67\x17\xbe\x57\xf9\x93\x5e\xca\x38\xcc\x4b\xd2\x37\x7f\xe9\xaf\x7a\x19\x94\x5e\x2e\xf5\xed\xfb\xc0\xd4\x7f\x56\xd7\x3d\xd2\x29\x65\xa6\x6a\x2e\x1b\x3f\x07\xe8\x78\x29\x44\xd4\x8e\x87\xd7\x39\x34\x54\xce\xd6\xf6\xb7\x5e\x54\x3e\xcb\xfc\x73\x38\x24\x3b\xe8\x7e\x33\x24\x65\x65\x6b\x65\x8a\x44\x38\xdb\x36\x93\xbd\x91\xf4\x4b\x08\xfc\x1c\x01\x1e\x15\x5e\x8f\xb5\x32\xfd\x16\x75\xe4\x41\x29\x5d\x4d\xfe\xd2\xc7\x2b\x63\x72\x05\xc6\xa6\xea\xff\x3d\x80\x3f\x02\x90\x11\xa8\x08\x74\x04\x22\x82\x24\x02\x1e\x41\x10\xb1\xaf\x92\x38\xa7\x3c\xbe\x76\x28\x44\xd1\xba\x94\x5c\x41\x44\xf2\xac\x5c\x69\xf2\xae\xfa\x1d\x54\xc8\xd6\x9f\x71\xc9\x0a\x8e\xa1\x72\x33\x1e\x5e\x0f\x43\x3e\x28\xbc\xe5\x2e\x3f\xf9\xa5\xc8\x3d\xbf\x2d\x2e\x92\x11\xa5\x3b\xd4\x38\x13\xf0\x5a\xad\xba\xb0\x2a\x9a\xc6\x9a\x6b\xe1\x80\xa6\xf0\x1f\xaf\x95\x2b\xf9\x4d\x38\xe2\x3a\x52\x6e\x12\x0b\x75\x34\x12\x52\x17\x97\x27\x9d\xaa\x70\x80\x6e\xbd\x66\x73\x25\xb6\x2b\x1e\x5f\x65\x81\x57\x9a\xae\x3e\xbe\xd6\x53\x6e\xa0\xd5\xf8\xad\xb8\x9b\xcf\x95\x3b\x11\x9a\xa7\x8f\xf1\x55\x38\xd4\xf8\xdc\x3d\x34\xeb\x73\xa2\x75\x24\xe7\x73\xe9\x6a\xae\x46\x42\xe3\x91\xef\x68\x26\xc7\x11\x1f\xcc\xe7\x44\xb9\x53\x85\xd7\x3b\x3f\xb3\xbc\x40\x28\x2a\x27\x57\x4a\x0c\x41\x31\xd3\x35\x20\xd9\x73\x41\x34\x1e\x19\x22\x09\x91\xcd\xa6\x72\xfd\x3b\xcb\x2e\x77\xf8\x23\xb0\x3f\x02\xfc\x91\xb8\xdc\xfe\x4c\x5c\xde\xcf\x37\x06\xbc\x74\x27\x42\x2f\xec\xd1\x17\x08\x9e\x79\x2b\x23\xfc\xf0\xb2\x56\x09\x58\xb1\xa6\x0b\x18\xf8\x9e\xbd\x8c\xf5\xb1\x87\x57\xb8\x06\x7b\xe6\xdf\x3d\x83\x22\x8c\xd2\xe4\xf9\x43\x7c\xd8\x5d\x80\x1f\xb1\x81\x84\x38\x62\x81\x84\x31\xbe\xec\x2c\xe0\x06\x1f\xda\x3b\x0b\xd8\x8e\xd8\x4d\x04\x93\x88\x6d\x47\x70\xb5\x8e\xa5\xee\xf9\x73\x6f\x12\x01\x7f\x81\x81\xf5\xaf\xbd\x71\x04\xfc\x8d\xf9\x1b\xfc\xf0\x14\xf0\x6d\xef\x23\x26\x94\xf3\xb9\x97\xa6\x71\xe3\x97\x9e\xe3\x40\x70\xe3\x9d\x01\xdf\xc5\x23\xde\xaf\x3c\x09\xc1\x1b\x83\xc5\x1f\x7b\x67\xe0\x4f\x30\x9f\xda\x73\x54\x55\xf0\xa3\x7f\xe4\xdd\xa7\xc5\xf0\x27\x42\x3d\x31\x7f\x9e\x63\x62\xb8\x37\x58\xe0\x1d\x16\xf0\xbf\x7a\x1d\xbc\xe1\x20\x6b\xcf\x68\x3d\xe5\xcf\x90\x66\xa4\x36\x27\x94\x77\xbc\x69\x04\xc1\xad\x21\x7e\xdf\xfb\x8a\x59\x5b\x6d\xad\x1f\xbc\x71\x84\x37\x1d\x44\xec\xde\x3f\x35\x3f\xe0\x2e\xfa\x85\xdc\xaf\x1d\x68\xa7\x39\x5c\x6f\x23\x96\x48\xb8\x8e\xd8\x3d\xbf\xc2\xae\xe0\x48\x38\xe6\x97\x3b\x36\x7f\x4e\xcc\x9f\x53\xf3\xe7\x8b\xf9\xf3\x0a\x53\xce\x1d\x61\x4b\x0e\x16\x70\x82\x0f\x3b\x0b\x38\xcd\x06\xf0\x79\xb4\xfe\x76\x86\x83\xf2\xed\x0c\xdf\xb2\xb1\xff\x8e\x0f\x8f\x16\xf0\x34\xc3\x7a\x19\x6d\xb8\x86\x91\x48\xa2\x51\x50\xbc\x89\xd8\xca\x1c\x87\xcb\x97\xaf\x4a\x7a\xaf\xd5\xdd\xfd\x6b\x95\xdf\x35\xcb\x54\x71\xc5\x53\xf9\x0a\x5a\x25\xcc\xa4\x22\x78\xfe\x89\xd2\x05\xbc\x8b\xd8\x33\x09\x1f\x22\xf6\x53\xc2\xab\x88\x7d\x88\xcc\x78\x9c\x45\xec\x95\x82\xd7\xeb\x89\xbc\xe7\x6f\x3c\x0d\x7e\x68\x1a\xfb\xd3\xb6\xf6\xe3\xda\x61\x51\xae\xdf\xc7\x6c\x87\x91\xcd\xc0\x88\x49\x89\x8e\xd7\x83\xdb\x44\x89\x32\x4f\x92\xf8\x31\x82\x33\x3b\x96\xcf\x24\x8b\x56\x66\x19\x86\x00\x22\x08\x0b\xea\xde\x7a\x1a\xf8\x99\x17\x01\xef\x7a\x09\xf0\x87\x9e\x4a\x89\x7d\xe2\x09\xf0\x3f\x7b\x1c\xfc\x13\x2f\x04\xff\xcc\xc3\x44\xe7\xaf\xd4\xa6\x44\xe7\xbe\x30\x6d\x95\xa6\xe4\x47\x83\xe8\xd8\x28\x36\x14\xbe\x46\xab\xd3\x2e\x3f\x04\x1f\xd3\x2e\xff\x8c\x98\x2f\x48\x48\x21\xdc\x94\x30\xf2\x67\x04\x33\x12\xdb\x84\x6f\x36\x15\xe7\xb3\x88\x0d\x25\xbc\x8c\x36\xdf\x3a\x32\x94\x6c\x46\x9e\x45\x1b\x0e\x8b\x3b\x89\x9c\xaa\x28\x10\x71\x2c\x06\x4e\xb6\xb6\xc6\x82\xa4\xbe\xdb\x6c\xb3\xa1\xf4\x25\x35\xba\x9c\x38\x99\x4e\xd5\x52\xb9\x9d\x25\x15\xf6\x65\x44\x9c\x2f\xf2\x5a\x46\x33\xd9\xd0\x77\x53\xe1\x35\x9c\x96\xa4\x0b\x33\x7b\xb0\x4f\xef\x48\x08\x36\x9d\xcb\x93\x3b\x07\xbe\x46\xc4\xbc\xc7\x97\x79\x16\x98\xfa\x87\x2c\x7f\xcc\xd2\x7b\xbb\x70\x0d\x25\xc4\x82\xbc\x52\xe8\x11\x86\x17\x91\xad\xc2\x66\x35\x38\x8a\xcc\xb7\x15\x9d\xe6\x47\x38\x60\x14\x3e\x47\xbf\x78\xaf\xca\xfb\x0d\x9c\x5f\x4b\x7b\x6f\xc0\xbf\x44\xd5\xeb\x26\x65\x9e\xe5\x79\x7d\x16\x3a\x9e\xde\xff\x97\x9e\xa8\xa1\xe5\x24\x80\xe9\x79\xab\xf4\x42\x5b\x8c\xf8\x46\x05\x01\x24\x4b\x88\xf9\x50\x2a\x8b\x66\x47\x76\x8f\x43\xc9\x0f\xfd\x39\x02\x3d\x9f\x0b\x1b\x84\x5e\xf9\x86\x89\xb6\xb2\x6f\x46\x11\xa2\x78\xf3\x5b\xc4\xbe\x44\xf0\xe3\x57\x7b\xe8\x53\xb4\x69\xc6\xd8\x0c\x5e\xa1\x99\x31\x37\x36\x77\x17\xce\x98\xb7\xd9\xf2\xf0\x7b\xb4\x3e\x7d\xe9\x8f\x08\xde\x46\x70\x4b\x3e\x45\xa5\x44\x64\xa8\xb0\xa1\x4c\xfc\x23\x62\xe4\xf7\x00\x05\x6b\x67\x65\x1a\x45\x9b\xcb\xd0\x98\xbe\xa6\xe9\xfd\x2c\x25\x62\xc7\x08\x9e\x5a\xda\xb1\xdf\x2d\x4a\x19\xb2\x9f\x0a\x54\xf8\x17\x29\xe5\xed\x90\x62\xa2\x33\xf3\x35\x31\xca\x29\xe6\x4e\xe4\xd0\xee\xe6\xb9\x96\xb1\xbe\x53\x92\x60\x5a\x8e\xc3\x4e\x9a\xcf\x4c\x40\xe0\x25\x98\xcf\x2c\xc1\x7c\x66\xca\xf4\x89\x06\xee\x25\x2e\x5f\xd0\x5e\xc2\x88\x60\x88\x68\x87\xf6\x49\x9a\x76\xbe\xd5\x05\xcd\x74\xab\x0b\x5d\xea\xa5\xef\xb8\xcd\x43\xdf\xea\x52\x48\x70\xd4\x7e\xaa\x55\x2b\xc4\x8a\x5e\xb9\x21\x2a\xb4\x1a\x6e\x39\xbd\x9b\x99\xab\x1a\x5b\x2f\xc2\xff\x66\x16\xf6\xd1\x2d\x13\x54\x98\xd0\x1e\x65\x48\xc0\x66\x6b\x37\x20\x48\x1e\xc7\x94\x42\x02\x1b\x98\x84\xab\x53\x4a\x8b\x10\xb6\xf1\x06\x14\xe0\x21\x7b\x02\x41\xc8\xc2\x04\xa2\x95\xc0\x1d\x3c\x99\x6b\x43\xc0\x9a\xcd\xad\x07\xe7\x7f\xc6\xb7\x7e\x74\xf1\xc0\x9e\xe1\x92\x78\x6d\x24\x6b\x49\xca\x98\xc4\x04\x61\x36\xd1\x74\x18\xb2\x55\x99\x32\x1f\x1f\xda\x6c\x95\xab\xf2\x5c\x72\xdd\x98\x44\xb1\x6e\x3c\xde\x98\xe6\x32\xdd\xeb\x8f\x42\xe2\x74\x5c\x23\x2f\xd7\x65\xd9\x1c\x8e\x23\xae\x6b\x39\x36\x79\x48\xba\x62\xf7\x37\x7b\x4f\x67\x39\x5d\x26\x0c\x43\xc6\xcd\x8a\xfc\x17\x69\xf8\x1b\xc3\x90\xc8\xd6\x41\xe7\x37\xf5\xdb\x41\xe7\xb7\xae\xd8\x35\xcf\x44\xb7\x39\xc5\x1f\x06\xb9\x68\xe1\x85\x7a\x71\x65\xd9\xe2\xc5\xad\x64\x01\x93\x10\x31\xb5\x9e\x2f\x82\xbf\x27\xda\x6c\x66\x41\xb3\xb2\x2b\x37\x58\x96\x6b\xc9\x2a\xb9\x96\x58\xb9\x66\x13\x7f\xa2\xeb\xaf\x2e\xdd\x12\x98\x11\x0e\x56\xfe\xd9\x3b\xd7\x61\x1c\xae\xbf\x61\x29\xb6\xfc\x64\xd5\x8c\x41\xc8\xc6\x21\xdc\x6c\x06\xff\x91\x83\x6f\x87\x4b\xb9\xb3\x53\x81\xbc\xbe\x8f\x44\xda\x47\x6b\xfb\x47\xae\x9c\x2d\x1a\xc5\x3f\x4e\x5c\x0a\x93\x90\x6d\x87\x70\x15\xfe\xe2\xad\x62\xd3\x90\x7d\x55\x30\x0a\x59\x9c\xc0\x2c\x64\x9f\xe1\x2e\x64\x27\xf0\x55\x2d\xf3\x79\xbe\x9c\x58\xaf\xaf\x75\xd5\xdb\x2b\x0a\xf2\x2b\x35\x06\x09\xe1\xa5\x8b\x04\x55\xd1\xb4\xdd\xee\xfe\xee\x81\x38\xf8\x8d\x88\x76\xf7\xf1\xc3\x8e\xb1\xd5\xd2\x6c\x09\x24\x39\xdc\x9d\xcf\xb7\x6e\x12\x22\x68\x9f\xb7\xbb\x1e\xa7\x2d\xb2\x6d\x7e\xb5\xb7\x13\x82\xc0\x45\x40\x4f\x60\x18\x55\xb5\x34\x5d\xa4\x6a\x4d\x52\xcf\x96\xb1\xdb\x3d\xe4\x7d\xa4\xc3\x53\x99\x56\x53\xba\xac\xe2\xf1\x21\x9f\xcf\x77\x1e\x33\xc6\x78\xb3\x99\x56\x9a\x41\xef\x1c\x3c\x7c\xb4\x27\xf6\xeb\xfe\xd6\x0a\xc6\xfd\xce\xe3\x87\x07\x39\x4c\x91\x73\xa3\x53\x82\x79\xf8\xf0\xe1\x81\x38\xa8\x3b\xd4\x2b\x68\xba\x9d\xdd\x83\x47\x39\xcc\xc1\x4a\x34\xdd\xdd\xce\xde\x41\x41\xcf\xc3\xd5\x88\xf6\x0f\x76\x4b\x44\x3f\x5a\x0d\xf4\x68\xb7\x7b\xf0\x28\x07\x7a\xbc\xb2\xba\x9d\xce\xe3\xc7\xfb\x3b\x39\x50\x91\xee\xa3\x82\x6a\x67\x77\xff\xd1\xc3\x12\x54\x77\x35\xae\x83\x9d\x83\xfd\xa2\x9b\xba\x3b\xab\x71\x3d\x7a\xb4\x6f\x3b\xb3\xa6\x42\x96\x05\x1e\x06\x14\xa3\xc0\xfb\xa6\x49\x62\x33\x26\x2e\x16\x30\x23\x37\x61\xe9\x4f\x10\x92\x01\x79\x93\xe5\x91\x1c\x26\x64\x8f\xc2\x38\x21\x4e\xdb\xa1\xa5\x97\x3b\xe5\x97\xf8\x9b\x52\xb8\xdd\x30\x55\x76\xca\x53\xe5\x3a\xfc\xf5\xcb\xe6\x32\x9d\x42\xa5\xb7\x0c\x63\x4a\x3f\x33\x67\x48\x07\xb4\xcb\x33\x5d\x6d\xab\x74\x2b\x86\x60\x78\x55\x16\x11\x8c\x53\x37\xbf\xed\x43\xa4\xf7\xcd\xcc\xc8\xad\x59\xaa\x85\x99\xe8\xda\x5e\x33\x73\x14\xb2\x49\x02\x27\x1b\x0c\x09\xf9\x1b\x31\x42\xbd\x85\x16\xc4\xe9\x8a\xb5\xab\x96\xec\xb3\xd2\xa5\xe5\x3f\x36\x37\xa7\x6f\xf3\x04\x99\x2e\x3c\x75\x96\xbb\xd5\x5b\xd5\xd7\x5e\xa9\xaf\xa1\x52\x8f\xc5\xc9\xb5\x85\x73\x0d\xc2\x30\xa4\xe6\x5d\xc7\x6e\x66\x66\xd0\x93\x70\x4d\xf6\x7c\x53\xee\x9b\x25\xa4\xce\x0c\x27\xe1\x5f\x95\xef\xda\xf2\x2d\x5b\x7e\x25\x4c\x3b\x85\x31\xfc\x72\xf1\x17\x6d\x2b\xbe\x07\x21\x19\x12\xdf\xa8\x83\x9d\xec\xff\xd4\x68\xa8\xf6\x28\xf6\x57\x45\xe1\x79\xb8\xc1\x84\x36\x4a\x72\x45\x3f\xfe\x16\xfe\xad\xeb\xe6\xf2\xdb\x0e\x4b\xb7\xb9\x61\x04\x95\x3d\x3c\x8e\xac\x14\x69\xbc\xb7\xa3\x7c\x6b\x5b\x36\x03\xeb\xa0\xdf\xc2\x3a\xa8\xa1\xe9\xfb\x8a\x39\xc3\x24\x31\xd4\x77\x21\xf0\xce\x60\xe0\x75\xc1\xf7\x3a\xa6\x11\x98\x8f\x61\x51\x51\x68\x26\x9a\x20\x66\xb3\xc4\x9f\x51\x4c\x0a\x63\x56\xb0\x05\x85\xa7\xab\x10\xcf\xc8\x77\xd4\xae\x97\x71\xbc\x11\x70\x95\x00\x2a\x3e\x16\x09\x37\x48\x3e\x2f\x1b\xc1\x76\x11\xc2\xa4\x38\x65\x24\xc6\x56\x2d\x12\x67\xeb\xd4\x56\xf5\x62\x61\x51\xa1\xc9\x7a\xb9\xce\x04\xc8\xa5\x44\xbb\x5b\xbf\xf2\x0c\x84\xbd\xbd\xe8\x4d\xb8\xfe\xf6\x1d\xcc\x66\x99\xb8\xdb\xf3\xb9\x31\x1a\xf2\x37\xc2\xbc\x11\x2e\x4f\xb3\x60\xa6\x09\x43\x6d\xba\xd0\x72\xfa\xc8\x02\x59\x7a\x55\x8f\xc0\x64\xa1\x62\x29\xcb\xa4\x40\x15\x3a\xcf\x32\x99\xa5\xf1\xe4\xee\x00\x78\x29\xbf\xa4\xc1\x16\x40\x92\xa6\xb7\x8c\x98\xb1\x55\x42\x96\x60\x1e\xd2\x04\xef\x78\x4d\x5c\xd1\xab\x53\x59\x23\x29\x82\x30\x4f\xa3\x89\xd4\x71\x9a\x11\x18\xd4\xc8\x32\xdf\xb3\x04\xa1\x29\x85\x79\x0b\x72\xa2\x0c\x3e\x8e\x44\x51\x78\x17\xb2\xef\xf0\x21\x5c\x7d\xc5\x4e\xfd\x76\xda\xb4\x12\x03\x70\x13\xc1\x4d\x44\xf3\xcb\x3f\x79\x2e\x8a\x35\x26\xe7\xd4\x98\x9c\x53\xbb\xa2\x97\xe7\xe1\x7a\x67\x9a\x92\xd0\xfa\x94\xba\x21\x6f\xc2\xd4\xb1\x74\x47\x3e\xd8\x11\x08\x28\x44\xf5\xa8\x37\xac\x3b\x4d\x7a\x6a\xbe\xd6\x16\xb7\x12\x9a\xa0\x40\x14\x51\xd4\xde\x5e\xad\x6a\x9f\x66\x39\x98\xbd\x2b\x08\x87\x20\x3b\xf1\x5b\xdc\x3e\x5a\x4d\x6d\x9a\x65\x17\x35\x0d\xd4\x2e\x5e\x50\x74\x16\xae\x71\x1f\xed\xa4\xde\xa9\x93\xd5\x76\xd5\x50\xc0\x28\x81\xb3\xd0\xde\xc3\xf2\x7a\x0d\x96\xc7\x10\x20\x92\x8f\xeb\x6a\xd9\x87\x11\x02\x7c\x55\xab\xaa\xf9\x88\xe2\xfa\x75\x48\x8e\x34\xb5\xe7\x7c\xaf\x8d\xd6\x8c\x52\xf3\x38\x34\xb6\xfd\xfe\x02\xbe\x56\xfa\xa7\xe2\x5f\x68\x2c\x4d\x7a\xc9\x66\xc4\x8f\xd0\xbb\x58\x4b\x7e\x1c\x0b\x22\xd2\x6c\x1a\x90\x42\xd9\xea\x35\xbc\x0e\x89\xc0\x8d\x39\x59\x8a\xc7\x47\x49\xf1\xd5\xe6\x89\x46\xf1\x80\xd9\x82\xe0\x38\x34\xbd\xfa\x7d\x83\xd3\xc3\xfa\xf2\xee\xc8\xd7\x30\x73\xe8\x1d\x87\x78\xa5\x54\x7a\xe5\xf2\xcf\x90\xe5\x9e\xac\x24\x76\xe0\x95\xb2\x9e\x2d\x21\x07\xf1\x91\x76\xe0\xb3\xfd\x99\x4c\x8d\x74\x1a\x94\xde\xc4\x9a\x2b\x5d\x06\x19\x86\x72\x24\xd4\x54\x85\x52\xa3\xd7\x0b\x5f\x66\x89\x8f\x63\xf4\x9b\xbd\xc8\xfc\x66\x5c\xca\x48\xa3\xe7\x37\x76\xe0\x04\xfd\x69\xb7\xe4\x3b\x38\x23\x21\x85\xe2\x3a\x52\x5f\x8e\xdf\x39\xf0\x55\xe1\x97\x23\x6d\x0b\x61\x32\x86\x1c\x3e\x16\xe4\x59\x9e\x34\x91\x52\x78\x96\x36\x04\x73\xb7\xd8\xea\x7e\x86\xb4\x4a\x85\x03\x2f\xa2\x35\xb8\x5e\xe3\x6d\x47\xf0\x72\xe5\xca\xc8\x24\xa9\xca\x7e\xed\xd9\x8b\x75\x8d\xed\x63\x7a\xf1\xc5\x06\xa3\xec\x52\xc0\xcb\x10\xd3\x9e\x58\xbb\xec\xf3\xfa\xdb\x20\x8b\x84\xe1\xc4\xda\xd2\x14\xde\xaf\x74\x1e\x08\x19\x44\x03\xf1\xe5\xf8\xf5\xd3\x68\x32\x8d\xa4\xc0\x5c\xfc\x0b\xf8\x62\x50\x5f\x52\x78\xb2\x61\x7d\xc7\x0d\x8c\x8f\xf6\xe0\x3f\xba\x09\x7f\x84\x69\xf8\x2d\xfb\x0f\x07\x76\x30\x23\xe3\xd6\x7f\x38\xb0\x8b\x4f\xcc\x81\x8e\x7d\xc5\x1c\x3c\xf6\x04\x9f\x42\xf6\x09\xde\x86\x2b\x59\xae\xec\x93\x4a\x98\x26\x8a\x92\xec\x96\xb3\x6d\x7a\xcf\xcb\xb7\x9c\xa1\x7e\x2a\xe7\x73\x0e\x89\x59\x78\xed\x0a\x92\xb8\x1c\x02\x14\xfc\x46\xd8\xa3\x6d\x17\x14\x73\x06\xcd\x4d\x2c\x12\x18\x21\x8a\xd3\xc7\xfc\x52\x2c\x70\x53\x17\x94\xd5\x4b\x7f\x0f\xab\x77\x3c\xe9\x4d\x2e\xa8\x5b\xf2\x36\xb4\x97\x93\xe8\xd4\x80\xfd\x23\x5c\xef\x06\xfc\xdd\x48\xc1\x95\x22\x6a\x12\xc2\xb7\x04\x6c\xf8\x00\xba\xef\x86\xe8\x0d\xec\x2e\x40\x0d\xd9\x9a\xeb\xf4\x97\x15\x04\xf4\xe3\xe9\xd4\x21\xc6\x69\x71\xa7\x81\x5a\x75\xd3\x87\xa0\xd4\xc3\x4f\x3b\x99\x47\x31\xf3\xec\x75\x6b\x37\x15\x48\xd7\x6f\x75\x51\xa3\x73\xfd\x97\xad\x6e\x76\x65\x81\x57\x2d\x25\x5d\x7e\xd2\xaa\x17\x55\x59\xb1\xe2\xc2\x10\x0a\x1a\x5b\xf7\x70\x01\x62\xb8\x5e\x39\xcc\xd7\x45\x55\xb9\x7a\x2f\xf3\xb1\x04\xe8\x63\x19\xe0\xf6\x72\x7d\xf1\xc3\x34\x5e\xd9\xf2\x67\x0d\x10\xb1\xe2\xa6\xe0\xe2\x72\x3e\x5d\x2c\x73\x16\x3c\xa9\x68\x8c\x0b\x0a\xc9\x70\xcd\xb0\x6e\xcd\x88\x18\xa2\x7a\xe7\x6e\x2f\x28\xf0\xe1\xfa\xe1\x4f\x86\x99\x77\x25\x18\xb2\x64\xcd\x9e\x4f\x75\x1e\x44\x25\xdf\x6c\xea\x99\x8d\x4a\x9e\xd9\xcc\x0d\x6b\xfa\x9b\x57\x5c\xb0\x8b\x5e\xc0\x08\x67\x24\xf5\xc2\x46\x7f\xe5\x85\x8d\xca\x5e\x58\xa3\xf7\x58\x0f\x7a\x34\x64\xcb\xd6\xc9\x93\x90\x02\x51\x11\xbb\xe7\xdf\x3c\x3d\x84\xe0\xb9\x47\x64\xc4\xee\x83\xe7\xde\x75\x02\xc1\x57\xdc\x7f\xfd\xe4\x5d\x27\x0b\xea\x06\xcf\xcd\x0b\x19\xb9\xc1\x57\xf3\x4e\x46\xae\xff\x69\x01\x6b\x38\x57\x99\xaf\x05\xf7\x62\x6b\x55\xd9\x0f\x5d\xbf\x4d\x28\x72\xf9\x37\x9a\xb9\xdd\x32\x56\xf6\x49\x30\x34\x9f\x82\xe7\x60\xf9\x36\x65\xda\x6e\x9a\x5a\x7a\x80\x6e\x77\xaf\x04\xa6\x0a\xf7\xf1\xff\xcb\xdd\x9b\x68\xb7\x8d\x63\x0d\x83\xaf\xa2\xf0\xd3\xa4\x81\xf2\x95\x22\xd9\x4e\x52\x61\x8a\xbf\x8e\xb3\x3a\x8e\xb3\x78\x89\xe3\xb8\x3a\x9f\x0f\x48\x82\x36\x6d\x8a\x54\x40\x50\xb2\x9c\xe8\x5d\xe6\x59\xe6\xc9\xe6\xe0\x02\x5c\x44\x91\xb2\xab\xba\xbf\x9e\xef\x4c\x9f\xae\x98\x22\x16\x62\xb9\xb8\x1b\xee\xb2\xb1\x5c\x0b\xd4\xc8\x8c\x1e\x57\xd6\x4e\xd5\x8c\xb0\x00\x04\x76\x71\xb2\x72\xb6\xcc\xd0\x8a\x13\xa2\xf3\x1a\xa2\x50\xd7\x1a\x13\xa5\x82\xdd\xb4\x1c\x86\x6a\xbe\x1d\x0c\xf9\xbf\xd0\x19\x17\x80\x7c\xf3\x9c\x7f\x58\xff\xd0\x22\x1f\x9a\xe5\x4d\x38\x19\x52\x98\x27\x64\x48\x29\x64\xaa\xfb\x4a\x66\x8b\x41\x9e\xf4\xe2\x9b\xa7\x05\xbc\x6f\x21\x7c\xf3\xe0\x63\x66\xc8\x5e\xd0\x0a\xad\x62\x41\xc1\x0d\xd6\x2a\x0b\x83\x02\x9c\xd3\x60\xad\x8c\xd1\x99\x11\x57\x2d\xd5\x38\xd3\x12\xad\x1b\x60\x2a\x41\x7c\x37\x23\xdd\x46\xf9\xf6\x75\x46\x90\x48\x22\x27\x8b\x86\x86\xb4\xb5\xee\x71\x46\xde\x49\x92\xe9\x24\x7b\xdf\x75\xc2\xb9\x28\x58\xc7\xd6\xe4\x23\xb9\xeb\xdb\x5c\xf5\x39\x23\x51\x88\x23\x8d\x42\x8c\xf0\x1e\x21\x8e\xa5\x77\x8f\x08\x5b\x87\x98\x7b\xe4\x3b\x0e\xca\x6f\x5b\x27\xc5\xc4\xdd\x63\x6e\xcf\xef\x5e\xcf\xfb\xce\xcc\xac\x2a\xc4\x68\x5b\xab\xff\x2c\x28\x4c\x97\x46\xb8\xaa\x69\xcf\xbb\xff\xd2\x78\x49\xcb\xea\x32\x07\x92\xbc\x43\xf2\x67\xfc\x5d\xb1\x68\x69\x50\xcc\xb8\x2e\x74\x2c\x57\xf4\xab\x15\x57\x4c\x0c\xa3\x50\x4f\x56\xea\x3f\x59\xf9\xab\xec\x63\x4e\xa2\x20\xef\x81\x52\xad\x2c\x07\xde\xbe\x61\xa7\x0b\x10\x66\x93\xba\x81\x73\x2b\x9a\xd2\xb7\x74\x4c\xaa\x22\xeb\x9f\x71\xf1\x24\x4c\x72\x18\xb8\x15\x4e\x8e\x1e\x99\x34\xaa\x14\x22\x15\x56\x4c\xec\xbd\x8c\xe4\x8f\xd6\xc2\x02\xef\xad\x9d\x04\xe0\x7d\xb3\x2d\xb0\xc0\xdb\xb4\xbb\x81\x42\x8d\xd6\x4f\x0b\xbc\xa7\xf6\x40\x21\xcd\x84\xaa\x4a\x32\xe9\x7b\x6f\x55\xbd\xbd\x8c\xa8\xe7\x6f\x54\xd5\x56\x4f\x9b\xaa\x81\x7e\xeb\x7e\xa6\xaa\xd9\x7d\xc2\xab\xaf\x66\x42\xae\x2d\xed\xe6\x62\x81\x1f\x7a\x8a\x40\xef\x06\x90\x16\x9f\xd0\x4b\x8c\x9f\x9e\x92\x69\x5e\xa2\x06\xaa\x5f\xea\xb1\xa6\xc5\x40\x75\x37\x54\x6b\x8e\x28\x8c\x15\x19\x39\x4f\xe0\x65\x48\x6e\x05\x85\xdd\x8c\xc2\x65\xf0\x77\x92\x73\x2a\x4e\x75\x64\x40\x58\x50\xcc\x55\x6e\x12\x74\x4e\x82\x75\x8c\x71\x8c\xe1\x2e\x29\x5c\x04\xe6\xb6\x76\x16\x38\x51\x0c\xf3\xa0\x5d\x79\xba\x5d\x55\x9e\xde\x04\xcd\x42\xa1\x89\x4b\x03\xd7\xc1\xfa\x74\xd5\x3b\x81\xb1\xb8\x39\x0a\x8c\xfd\xcc\x71\xd0\x9a\x07\xfb\x75\x53\x51\xc7\x5c\x20\x2d\xe0\x6c\xed\x4c\xc7\x5a\x8f\x84\x91\x8a\x0b\x39\xe0\x6a\x1d\xbf\x91\x4b\xe4\x4c\x49\xe4\xac\xcf\x0a\x1e\xac\xa7\x3e\xe9\xf5\xbb\x0f\x1f\xea\x07\x66\x98\x32\xfd\xcb\x57\x5c\x79\x18\x10\xd5\xd4\x43\xfd\x4e\x6e\xc0\x68\x46\x73\xab\x28\xd7\xa2\xe9\x87\x58\x54\x65\x7d\xb9\xa4\xd4\xa9\x26\x7f\xe9\x73\xd0\xb7\xd9\x2f\x97\x57\xa4\x25\xf3\x4a\xa1\x3d\x49\x4a\xdf\x63\xed\x7e\x89\x06\xff\x1e\xde\xfc\xf8\x46\x21\xe4\x61\x46\x13\x33\xfe\xac\xa6\x0a\xd2\xeb\xf3\x32\x20\x99\x62\x84\x9e\xaf\xae\x90\xe6\xcd\x30\x97\x4e\x51\x9a\x2c\x8d\x20\x76\x92\xaa\x2e\x6b\x2f\x84\xa4\xcf\x8a\x3c\x33\x2f\x03\x92\xf4\x7d\xbc\xf4\x6a\xff\xee\x02\xce\x83\xa6\x3c\xc8\x75\xae\xd8\xcc\xb7\x7a\xf3\x88\x46\x97\xc0\x90\x43\xf6\x90\x43\x0e\x03\x72\x4c\x14\x45\xfb\x63\x50\xdb\x72\xfd\xb0\xbc\x00\x1a\x3c\x66\xe4\x3c\xc0\x4c\x66\x6a\x11\xf4\x94\x59\xdf\xaf\x4e\xf8\xd7\xaf\x24\x5f\x8e\x50\x2d\x87\x28\x97\x23\x5c\x19\x60\xb0\x9c\x96\x67\x2f\x84\xb0\xcf\x20\xec\xbb\x10\xf6\xbd\xfc\x63\xa1\x5a\x96\x80\x2e\xd6\x8f\xa6\x34\x72\xdd\x53\xef\x5c\x72\x15\xc0\x32\x58\xa3\xad\x59\xf3\xf2\xa9\xd1\xfd\x8d\xe5\xbb\xc0\xe5\x53\x02\x7d\x43\xb6\xe6\xbb\x60\x32\x46\x75\x8d\xbf\x58\x10\x2f\x27\xe3\xf9\x38\x46\xd3\xc4\xc6\xd5\x90\x45\xf2\x1e\xa6\x60\xc0\xab\xa6\xeb\xde\x0b\x8b\xb9\x99\x75\xd0\x53\xdc\x0f\x1a\x35\xce\xe7\x9a\x37\x7b\x5e\x99\x2e\x46\x28\xa0\x85\x7e\x7d\x59\xdb\x56\xcf\xc1\xb4\xa0\xf0\x31\x68\x54\x66\x08\x62\x04\x1e\x49\x6b\xd1\x26\xf7\xf5\x5b\x5b\x27\x23\x2c\xd5\x1b\xbb\xad\xb8\xd1\xb5\x31\x18\xab\x65\x90\xe4\x69\xe0\x30\x09\xef\xee\x60\x26\x4f\x03\xf2\xd3\x63\x4a\xec\x11\x36\x61\x4e\x06\xbb\x01\xa9\xab\xcd\x66\x01\xa0\x53\x49\xc9\xad\x9b\xad\x19\xcd\x4c\xb6\xb4\xb1\x2c\x14\x62\x8a\x2f\xf7\x6e\x6d\x01\xde\x07\x3b\x06\xef\xb1\xbd\x23\xc1\x67\xb6\x04\xdf\xb7\x1f\x0c\x17\x46\x41\xbd\xa0\xf0\xa9\x15\xf1\x4e\xc9\xbb\x00\xac\xb7\xaf\x8f\x2d\x45\x8e\xe0\x22\xd0\x7c\xf1\xa1\x21\x7b\xb3\x80\xa0\xfa\xcc\xe2\x42\x24\xc2\x82\x1d\x45\x13\x15\x11\x3c\x09\xd6\xdc\x7e\x11\x61\x4c\x26\x6f\x03\xe7\x2c\x86\x57\xc1\x5a\x5b\xec\xdb\x00\x66\xe4\xc4\xfc\x73\xc5\x21\xa6\x30\x46\x46\x64\x9f\xaf\x14\xcc\x64\x91\x98\xf9\xed\x1a\x79\xf5\x55\x80\xfc\xee\x07\x01\x02\x76\x70\x28\xa4\x89\x2e\x99\x95\xf5\x32\xbd\xb2\x71\xd6\xe8\xcd\x5c\xdc\xf1\x14\xae\xcb\xc7\xe1\x98\x27\x99\xec\xf0\x1b\x8f\x73\x9f\xfb\xf5\x10\x01\x1f\xb9\x9c\x25\xe2\xba\xa3\x17\xed\x79\xc5\x33\xab\x26\x2d\x4d\x02\x38\xc3\x2b\x98\xa3\xbe\x77\x49\x37\xac\x8e\xb5\xa1\x7f\xec\xc3\x61\x40\x84\x02\x83\xda\x85\xb7\xf5\x25\xe6\xb9\x85\x8b\xe0\xe9\x24\x89\x53\xde\x09\x44\x32\xee\xb0\x49\x88\xb7\x28\x7d\x56\x77\xa8\xfe\xc0\xa2\x20\x11\x63\xee\x77\x32\x11\x99\x3a\xe8\xd7\xa5\x49\xec\x9b\xe6\x83\x58\x06\x60\x89\x4b\xed\xa2\xce\xde\x96\x5a\x1b\x87\x99\x76\x41\xc8\xe7\xf2\x56\x01\xca\x27\xc5\xfd\x9e\x24\xe4\x55\xa8\xb7\xe8\xe0\xfe\x3d\xaf\xef\xf0\xd6\x74\xf8\xc1\x80\x65\xc3\x66\xe6\xf5\xe3\x0d\xeb\xd1\x8a\xf6\x55\x9b\xd6\x92\x75\x8c\x0e\xa5\xf0\x65\x8d\xd4\xb8\xab\xef\x15\xf1\x70\xbc\xf8\x5b\x0c\xe0\x9c\x7c\x0c\x74\x76\x76\x9c\xc2\x41\x48\x12\x8c\x8f\x63\x84\xe1\x80\x68\x01\x80\xc2\xb5\xa4\xb4\x9a\x0a\x95\xc2\x8f\xd5\x81\x55\xaf\xbf\x45\x71\xfd\xfd\x60\x58\x5a\x78\xa1\xe6\x5d\xf3\x51\x79\xbe\x47\x73\xa5\x8d\xea\xcf\xcf\x41\x7b\x42\xf8\x1f\x41\xb3\x29\x1f\x0a\xcd\x7a\x48\xef\x03\x67\x1e\xc3\xd7\xc0\xf9\x10\x13\x6b\xc2\x45\x1a\xa6\xf2\xad\x82\x8d\xd7\x37\x13\x16\xfb\x3b\x51\x64\xc1\xfb\x80\xc2\xb7\x35\xa7\x74\xbf\x10\xc3\x63\xb7\xad\xd6\x3e\x11\xf0\xd3\x9b\xda\x35\x26\xaa\x16\x3b\xe6\x90\xfc\xa9\x01\x09\x6d\x4f\xbf\xaf\xc4\xeb\xce\x13\x41\x8e\x39\x30\x8e\x19\x35\x67\xe4\xa5\x04\x25\xc6\xc4\x94\xe2\x51\x60\x5a\x7d\x29\xdc\x3b\xb4\xd4\x71\x7e\xd9\x29\xd7\x8f\xd9\xbd\xb6\x8b\xd8\xfe\xc2\x85\x71\x00\x71\x7f\xd7\xe8\x48\xf9\x6a\xd3\x62\xdb\x6e\x91\x90\xb3\x4b\xa4\xe4\x47\x48\xc9\x8f\x91\x92\x1f\x42\xe2\x88\xfe\x2e\x84\xaa\x58\xe7\x62\xb9\x57\x36\xc8\xf5\x99\x20\x45\xff\xcb\x9d\xb9\x20\xb3\xb5\xb9\x20\xd9\xfa\x5c\x90\x5e\x5e\x6c\xf2\x59\xc2\x3c\x23\x49\x43\x76\x48\xb9\x94\x1d\x32\xc4\x1f\x5e\x96\xca\x64\x8c\x60\x85\x41\x2c\xf8\xe8\x5a\x12\x9d\x01\x18\x73\x1d\x7c\xaf\x25\x90\xe4\x74\xb4\x4b\x62\x58\x4a\x22\xc9\xa9\xb6\x70\xcf\x5c\x25\xbe\x6c\x2f\x80\xb9\xce\xa1\x20\x56\x90\x78\x59\x6a\x51\xf0\x56\xf7\xa2\xfd\x72\x7f\x8f\xe8\xbd\xbd\xb2\xe3\x3e\x1b\x59\x96\x2d\xfa\xee\x15\xe0\x56\x7f\x0e\x0c\xc3\x2e\xfa\xee\x35\x1d\xa9\x7f\xed\x5d\x85\xda\xaf\xf3\x90\x37\x0b\xaa\xd6\x03\x89\x53\x57\x92\xcc\xa5\xc0\xdc\x66\x87\xd6\x52\xc9\x50\x1a\x11\x34\x8e\xe0\x93\x5a\xb7\xca\x20\x2e\x03\x98\x87\x44\xd1\x48\x35\x88\xbf\xfe\xc1\xad\xc6\x89\xaa\x7e\x86\xb0\x9b\xd4\x6d\xba\x4c\x25\xb6\xbf\x5c\xa9\x26\x9b\xef\x11\x61\x4a\x50\x2d\xeb\x1a\xe7\x8b\xd0\x6d\x64\xd2\x74\x8f\xcc\xc6\x9c\x8c\xde\x94\xf6\xdd\xd1\x05\x11\x50\x3f\xe0\xd4\x5c\x18\x99\x24\x83\x16\xe6\x6b\x51\xc0\x61\xd1\xd2\xe2\xe1\x9c\x48\xf8\x98\x68\xb5\xcf\x69\x02\xb2\xef\xed\xc2\x8c\x70\x57\xbb\x7e\x15\x0b\x93\x98\x85\x41\xd0\xeb\xb9\xf3\x1e\xba\x9a\x1b\xd3\x93\x05\x85\xc0\x5d\x87\x7d\xe3\x3f\x9c\x41\xc9\x9b\x2e\xa3\xe3\x8e\x50\x88\xa8\x37\xd4\x5e\xcf\x6a\xfa\xae\xbb\xde\x94\xa9\xda\x9b\xac\xf7\x26\xcd\x5d\xba\xe8\xb3\x6a\xbf\x18\x7c\x34\x51\xac\xa6\xc4\x25\x4e\x5b\x11\xd3\x3b\x49\xe6\xc4\xc5\xf9\xc3\xa9\x56\x36\xba\xcd\xd7\xfa\x83\x3f\x44\x6e\x78\x78\x8e\x37\x49\x8d\xbe\x90\x6a\x7c\xbc\xef\x56\x47\x58\xbc\xea\xbb\x05\xa2\xe6\x15\x09\xdb\x78\x05\xe6\x56\x84\xaa\x28\x67\xae\xf2\x2a\x06\xc8\xf4\x65\x1a\x2f\x41\xff\x90\xfc\x99\xf5\xf1\xde\xac\xcf\xbe\x17\x00\x8b\x91\x60\xdc\xfc\x8b\xb9\x45\x86\x0b\x9e\xc3\x96\x9a\xb2\xbe\xbe\x3f\xf3\x74\xf3\x45\xdd\x61\xb2\xda\xcf\xc3\x87\xc5\x63\xde\x69\x82\x9d\x86\x4e\x62\x92\xb7\xba\x28\x8c\xeb\xcf\x30\x60\xd5\x8f\xe1\x66\x24\x68\x4c\x9d\x48\x14\x14\xf1\x21\xcb\x1f\x3c\x18\xf2\xad\x3f\xe2\xd1\x8c\xa4\x2e\x88\xde\x36\x30\x25\x79\x44\x2e\xc4\x1b\x43\xf3\x1b\xf3\xef\xe7\x4b\x95\x8b\x52\x85\x0b\xe5\x21\xf9\x93\x9b\x59\x14\x4a\x40\x0a\xbe\xdb\xce\xbd\x44\xae\xb6\xc9\x40\x45\x69\xd3\x96\x17\x46\x1a\xab\x34\x53\x27\x4f\x96\x7d\x57\xdb\x59\xb9\xc0\x37\x14\x6f\x1d\xb8\xc0\x7b\x08\x83\xfa\x94\x74\x5b\x3f\xaf\x33\xd6\xc4\x68\x69\xa4\x6a\x8e\x5d\xe7\x3d\x5c\xb6\x93\x40\x23\xca\xe6\x23\xf9\x2a\x74\x26\xe7\xc6\xb8\x08\xa6\xd2\x83\x0b\xa2\x53\x99\x3f\x7c\x78\x41\xb8\xe1\x52\x50\xb7\xb6\x66\x51\xf6\x24\xe0\xad\xb1\x59\x97\x0b\xb7\x49\xe4\xd6\x4c\x0f\x2d\x14\x8f\x7a\x90\x63\xf4\x89\xe3\xda\x35\xaa\xeb\x92\x53\x94\x57\x26\x2e\xcc\xc8\x47\x09\x43\x90\x14\xde\x9a\x7c\xdd\x99\x83\x96\xe6\x20\x1c\x9d\x68\x2e\x03\x49\x1f\x6d\xfe\x1a\x50\x3c\xbe\x33\x72\x98\xc0\x8c\x4c\x5d\x10\x5a\xc9\xdb\xdc\x5f\x86\xfd\x69\xcd\x7d\xec\x7c\x15\xea\xeb\x99\x13\x3f\xca\x14\xcd\x79\x24\xc1\xa4\x12\xca\x33\x49\x5f\xba\x84\x1b\xbe\x92\x4b\x88\x39\xe9\x3f\xd6\xaa\x64\xc0\xdc\x86\x37\x12\x39\xd9\xd2\xcc\x62\x34\x7c\xb4\xf5\x1b\xc9\x36\xe4\x06\x89\x7b\x82\x3e\x8a\xa9\x3d\x58\x50\x98\xad\x43\x56\x39\x72\xc8\x85\xf5\x07\xbc\xcf\xfa\xee\xaf\x5f\x75\x84\x90\x1f\x63\x06\xcb\xf7\xdc\x68\x7b\xa4\x9e\x73\xaf\x8d\x4c\x71\xaa\xb1\xe3\x81\x70\x38\x48\x67\x97\x48\x85\xf5\x99\xa2\xf1\x3c\x4a\xb9\xc6\x4a\x5f\x51\xb2\x2c\x3a\x37\x37\xa4\x20\x15\x4f\xbb\xa0\x30\x77\x9b\xf4\x8f\x27\x24\xa6\xfd\xab\x24\x8c\x91\x3c\xc0\x4d\x1b\x4c\x58\xb9\x72\x5c\xfd\x15\x8a\xbc\xeb\xdd\x1f\xc5\xf6\xdc\x25\x73\x32\x73\xf5\x2e\xe8\xbd\x35\x18\xf4\xba\x05\x83\x2e\x77\x46\x0b\x4d\x76\x03\x4c\xe5\xd3\xd9\xe8\x6f\x3e\xfe\x6d\xcc\xc9\x8c\xdc\x18\x1a\xf5\x1b\x19\xf6\x50\x13\xb1\x73\xcf\x31\x0f\xf2\x21\x0f\xed\x39\xb9\xd6\x98\x7e\x46\x2e\xdc\xc2\xdb\xf9\xc8\x75\x5e\xc1\xb1\xdb\x78\xc5\x94\x47\x1c\x2b\xf8\x9e\xa7\x65\xa8\x30\x5e\xa5\xa4\xbc\x42\x49\x35\xc1\xbe\xb4\xa5\x62\x01\xee\x4b\x55\x6b\xd1\x44\xe6\x24\xd4\x43\xdd\x27\x1c\x7e\xb2\x73\xd5\x99\x5a\x7e\xde\x67\xe7\xe0\x4d\xed\x5d\xc2\xfb\x1e\xc6\x6d\x41\x13\x32\x25\x24\xb9\x91\x7d\xba\x58\xe6\x93\xb0\xec\xf9\xba\x2e\xb3\xa2\xc7\x9c\x55\xca\xa8\x9a\xcc\xb4\xd6\x5f\x25\x69\x1d\x36\x77\xc7\x7a\x7a\x25\x0f\xb4\x5d\xab\xc1\xb6\x54\x0d\xed\xb7\x5b\xad\xf7\xb8\xde\xd3\xeb\x7a\x4f\x5b\xf5\x9e\xf6\xeb\x35\x0a\x23\x75\xe6\xe4\x93\x51\x53\x65\x98\x45\x74\x9f\x30\xf8\xe9\x46\xb6\x87\x01\x46\xce\xe9\xe8\xd4\x9e\x91\x2e\xc7\xc8\x6d\xfa\x05\xeb\xbb\x91\x8d\x88\x7b\x38\xd0\x09\x89\x8e\x5c\xd8\x71\x75\x75\x28\xf1\x7c\xa3\x24\xf1\x19\xf5\x96\x6a\x85\x16\xe0\x66\x14\x52\xf5\x19\x6f\x4f\x5f\x4d\x55\x67\xbb\x12\x5b\x8e\x23\xc7\x97\x2b\xa0\x5e\xbb\xc6\x6e\xe2\xac\x0d\x90\xd1\x2f\x10\xbc\x2f\xfa\xd6\xe2\xca\x5d\xe7\xd0\xe6\xbd\xb4\x05\x78\xef\x55\xfd\xcf\xb6\x04\x6f\xcb\xe6\xaa\xd1\x4b\xd7\xf9\x22\xe0\xdc\x75\x1e\xf5\xff\xfb\x11\xec\xb9\xce\x0b\x41\x86\x8f\x06\x14\xf6\xef\x09\xef\x95\x18\x7b\x5b\x68\xe8\xc7\xfa\xdd\x11\xb7\xf5\xce\xbd\xb0\x11\xc1\x1e\x64\x20\x31\x14\x42\x75\x8b\xea\x00\xa3\x3d\xb5\x71\xdd\x13\x09\x6a\x6a\x16\x4a\xc4\xde\x17\xdb\xb2\x16\x90\xef\x06\xef\xbb\x2f\xd4\x3a\x16\xce\xdb\x88\x75\xcd\x01\x7a\x6d\x0e\x50\x2e\x25\x15\x07\xe8\xde\xa0\x55\x1e\x0b\xf6\x7c\x65\x7c\xd9\xdd\xc3\xcb\x81\xa5\x3d\x35\xb8\x9b\x2c\x14\x28\x65\x94\x9a\xb9\x50\xda\x08\xb9\xf9\x10\xd1\x3f\x7d\xcd\x31\x72\xf7\x56\x8f\x47\xd3\xf8\x8d\xd3\xbb\xe9\xae\x19\x27\xb9\xb7\x36\x22\xc5\xcc\x88\x0a\x19\x5d\x60\x30\xa6\x16\x48\x5d\x50\xf8\xb8\x5e\xa2\xcf\x53\x72\xe6\x96\x4e\xbb\x6e\x8b\xba\x37\x0f\xc1\xe4\x32\x14\xdc\xd9\x1e\x4a\xee\xac\x8b\xa2\xbb\xbb\x03\x81\xfa\xf5\x0e\x5c\x27\xdb\xb0\xfe\xeb\x51\xae\x5a\x4b\x1d\xee\x12\xb7\xc8\xb1\xb7\xaa\x76\xc4\xc9\x2b\x64\xfb\xce\xc8\x5b\xc5\x22\x0d\x70\xa3\x23\xbc\x35\xf2\x1b\x52\x77\x46\xb5\xf8\x50\xb1\x13\xa1\x95\x39\x22\x0f\x6f\xcf\x7e\x65\xf2\x8c\x62\x94\x47\x64\x48\x5a\x37\x3c\x5c\x80\x2b\xa9\x49\x47\x8a\x26\x9c\x6d\x79\x72\x4a\x4c\xfc\x2e\x81\x0b\xad\x62\xd5\xb0\x96\x69\x27\x65\x6b\x81\x8c\x9a\xab\xa0\xec\x45\x70\xc7\x47\xb1\xd4\x95\x70\x19\x90\x56\x44\xa5\x78\x0c\x44\x55\xc8\x26\x55\x9c\x97\xce\x89\x87\x74\x5b\xd4\x09\xcf\x39\x89\x12\x60\xab\xd1\x33\x43\x7c\xbb\x02\x29\x71\x46\x22\x35\x0b\x0c\x8b\x45\x9f\xaf\x6c\x8b\xaf\x46\xb0\x67\x47\xe0\x32\xdb\xef\xbb\xcb\x18\x42\x2d\xcc\xd4\x99\x91\xd8\x05\x09\x8c\x42\xd7\x99\x11\xa9\x9e\x93\x95\x9e\x96\xc3\x49\xc8\x3e\xbb\x1c\x05\x76\x88\x51\x25\xf0\x47\x98\xd8\x1e\xb0\xae\xdd\x55\x1f\x9a\x2a\xc8\x45\xda\x8c\x65\x33\xb2\x6f\x36\xf0\x1b\x07\x91\xa9\xd3\x71\x10\xa0\x6c\x48\xed\xb2\xec\x6b\x5e\xf6\xc6\x94\x41\x59\x96\x65\x58\xf6\x21\x20\xbc\x4a\xad\x2b\x87\x59\xe8\x23\x27\xd4\x91\x4b\xc9\x3e\x91\xf0\x13\x13\xd7\xa2\x71\x6e\x19\xf1\xec\xe9\xda\x16\x47\x0d\x2d\x7e\x5f\xdb\xe2\xb8\xa1\xc5\xb0\x7e\x3a\xdc\x93\xfa\xe9\xd8\xc6\xa5\x1f\x3b\x33\xe2\xa1\x1d\x14\x24\x6a\xf1\xc7\x78\x56\xc6\x7d\x17\x2e\x9d\x71\xdf\x83\x89\x53\xae\x80\xd4\x2b\x70\x49\x61\xec\xa4\x64\x46\x3e\xba\xd0\xed\xbb\xd7\xb8\x4e\x13\xc7\x1f\x55\x58\x21\x1c\xdf\x98\xc2\xe4\x3b\xa5\xf6\xa4\xbe\x91\xb8\x45\x27\xc8\x4c\x4f\x0a\xb4\x3d\x71\x6e\xc8\xb1\x0b\x2e\xda\x3b\x32\x05\x0a\x97\xce\xa4\x9a\x46\x5c\xcf\x83\xd9\x53\x67\x82\x33\x29\x07\xc6\xf3\x81\x15\xd8\x71\xea\xdc\x90\xfd\xb2\xb3\x50\x75\x36\x5d\xed\x6c\xc7\x9e\xd6\xba\xca\x6a\x5d\x3d\xab\x63\x99\x37\xf5\x75\x1c\x6a\x34\x73\x51\x55\xad\x96\x10\x8b\x46\x65\x17\x20\xfa\xec\x35\x1d\xcd\xc8\xb7\xe2\x87\x3d\x23\x5f\x8a\x1f\xe0\xfd\xd0\x38\xfb\x6b\x40\x1e\x0c\x2b\xd8\xf8\x12\x91\xd7\x85\xb3\xe5\x38\x0e\xb9\x70\xce\x49\xa0\x86\xa6\x08\xf0\xc3\x87\x17\x7d\x77\x84\x28\xea\xa3\x12\x5c\xbe\x0a\x72\x81\x04\x98\x52\x7b\x9c\xac\x0e\xe4\x42\x7d\xe3\xb2\x38\x14\x5f\x03\x72\xa9\xe6\x9d\x24\xc0\x32\xb8\xe2\xe4\xd2\x28\x76\x16\x14\x4e\x5d\x67\x1e\xc3\x3b\xb7\xaa\xc4\xc6\x78\x84\xaf\xd8\xfc\x53\xf0\x95\xf3\x6b\x0b\x4e\x5d\x0a\x9f\xda\x84\xd5\x18\x78\x7b\xa6\xe4\x3c\x38\xe1\xb2\x15\x4b\x1e\x9a\xb0\xd5\x96\x65\x60\x32\x21\xf3\x51\x51\x35\xef\xa9\x36\x59\xef\xc6\xe6\xc5\x3c\xf5\x0c\xa5\x9a\xe1\x1b\x4e\xb4\x6f\x01\x53\xe5\x94\xc2\x3b\x97\x14\xe6\x65\x87\x6b\x39\x2b\x93\xd8\xaa\xea\x52\x83\xc1\x35\x5c\x63\x04\xf2\x45\xac\x35\xf8\x4e\xb5\xd1\xff\x82\xc2\xad\xeb\xcc\xc8\x17\xad\xb8\x33\xba\xe7\x77\xaf\xac\xef\x14\x3d\x7b\x5e\xb9\xce\x75\x0c\x6f\x5b\xc9\xed\x7b\x49\xe2\x47\x28\x94\xbf\x71\xef\xef\xfa\x27\x36\xe2\xba\xb3\x89\xb6\x1d\xe0\x7d\xf7\x33\xba\xff\x97\x35\xb9\xf6\xb5\x02\xe1\x88\xdc\xab\xef\xa0\x75\x93\xab\xb4\xec\x8d\x46\x21\x33\xf2\xd6\x85\x18\x33\x96\x3f\xe1\xdb\x26\x54\xc8\x87\xf6\xbb\x03\x46\x66\xe4\xc0\x48\x61\x7d\xf6\x75\x41\xe1\x8b\xdb\x7e\x31\x32\x0b\x61\x73\x5b\x7f\xa3\x68\x06\x4f\x06\xb8\x7f\x2f\xd6\xb7\x1b\xf2\x2d\x3d\x2e\xbc\x37\x5a\x5f\xf7\xc9\xa0\xec\x5e\x55\xff\xdc\xaa\x0a\xaf\x8d\x7f\xf2\xff\x97\x4c\xf9\x14\xde\xbb\xed\xf7\xd8\x83\x3f\xe2\xd1\x9c\xbc\x77\x21\xfe\x3f\xff\x67\x08\xbb\x44\xa8\x6d\x18\x3e\x8c\x47\xbb\x44\x82\xa0\xb6\xa4\xb6\x12\x7f\xbe\xae\xd1\x31\xbd\xd7\x42\x28\xaa\xa5\xe1\xdb\x9a\x6f\xed\x92\x19\xf9\xea\x42\xdc\x1b\x73\x22\x29\x1c\xa1\xf6\x48\x83\x55\x9c\xb6\xf7\xff\x4d\xf5\x6f\x0d\x30\xd7\x86\xde\x44\x91\xde\xbd\xe7\x05\xec\x0e\xf9\x16\x36\x92\xe9\xfd\x20\xd7\xdd\x52\x10\x28\xee\xe9\xe2\xce\x53\x67\x46\x5e\x08\x18\x60\x9e\xa7\xd4\x91\x0c\x98\x7a\x75\x12\xc0\xa9\x0b\x59\x4a\xc1\x5b\x33\xb5\x73\x09\x44\x61\xd8\xd6\xeb\xb8\x49\x96\x5e\x12\x7d\x85\x2a\x16\x94\xc2\x9f\xdf\x73\xaf\x5f\xb5\x2b\x49\x7a\xcf\xd8\x29\x61\xda\xbe\x2b\xda\x3a\xe2\xf3\xa7\x23\x63\x1e\xa1\x8d\xa1\x21\x58\xed\x3b\xae\xdf\x6b\x17\x29\x4b\x74\x9a\x92\xaf\x68\x7e\x7a\x4e\x2c\x73\xe7\x92\x5a\x30\x23\x5e\x0a\xdf\x32\x20\xc2\x11\xb4\xef\x5e\xeb\x0b\xb0\xd2\xbf\x88\xa5\x44\x60\xc2\x4d\xf5\x3a\xf7\x4b\xc2\x97\xec\x54\xbf\xd4\xa9\x45\x30\x3a\xcf\xa9\xab\xde\x9f\x98\xf7\xc9\x78\xcc\x63\x99\xbf\x3d\x36\xf7\x7b\xbe\xa5\x73\x8b\xbf\x72\x75\xbe\xf1\x53\xbc\xac\xf2\x4d\x3a\x1f\xf4\x4f\x4d\xc1\x62\x93\x49\x14\xea\xf0\x54\x8f\xae\xd2\x04\x53\x45\xbe\xae\x69\x0c\xdf\x06\x64\x4e\xc2\x14\x57\xe4\x56\x6b\x75\xdd\xd6\xf5\x66\xc7\x8a\x93\x38\x06\x86\x9c\xd9\x09\x4c\x6d\x01\xa9\x1d\xf7\x53\x60\xa7\x36\xa6\xa7\x77\x7d\x1b\xf3\xba\xb8\x07\x36\x66\xad\xdf\x53\x92\xf0\x82\x42\xda\x0a\x1e\x41\x48\x10\x82\x37\x98\xf6\x8d\xa3\x10\xa5\x4e\xa3\xf0\x20\x45\x38\x26\x74\x01\x7e\x6a\xdc\x7d\x66\x16\x3c\x19\x6c\xff\xce\x1f\xe3\xa2\xf8\x16\x60\x38\x04\xfc\x71\x69\xc1\xd6\x13\xf3\x3c\xb6\x34\x66\x57\x5b\x62\xe1\x41\xf9\x4e\x61\xba\xfe\x70\x3d\x5d\xf1\x82\xc3\x97\x15\x87\xb6\x5f\xbf\x9e\x2e\xd4\xb0\x9f\xf6\xee\x63\xf7\xfa\x9f\xc8\x99\x67\x96\xaf\x9b\xb6\x50\x3f\xc6\x48\x4c\x47\x43\x7b\x50\x04\x1b\xbe\xcb\x3a\x77\x6b\x58\x1b\xe6\xe3\x67\x1b\xb2\x36\xd2\x67\x83\xfc\x55\x31\xd8\xe1\x66\xf1\xae\x18\xf0\xf0\xf1\x30\x7f\x57\xd0\x86\xe1\xef\xc5\xbb\x82\x3e\x6c\x0e\x37\xf3\x77\x05\x8d\xd8\xdc\xde\xca\xdf\x15\x74\x62\xf3\x69\xf1\xae\x12\x66\x61\xb0\xbd\x21\xeb\x2b\xb3\xb5\xb5\xbd\x81\x76\x12\xe3\xf4\x6e\x2e\xe5\xb2\xb5\x8e\xa7\x56\x6f\x63\x46\xba\x78\x56\xe8\x86\x62\x7d\x27\x69\x2b\x99\xad\x2f\xed\x56\x1d\x02\xf4\x6e\x6c\x3e\xb3\x37\x7f\xaf\xc3\x42\x9d\xe6\x6e\x0d\xea\x44\x77\x58\x0f\x8c\x31\xc8\x97\xb6\x1e\xe7\xa2\x1e\xd3\x62\x50\x8f\x5f\x31\x5c\x5d\xc4\x95\x15\x44\xfa\x7a\x91\xde\x79\x87\x30\x23\x13\xbd\x3a\x90\x39\x09\x33\x86\x98\xc3\xcd\x3f\x9c\xec\xd7\xaf\x63\xf4\x1b\x2b\x2f\x45\x7f\xb2\xaf\xb6\x04\x36\xb1\x05\xb8\x5b\x76\xbc\x30\x4c\x5c\xc8\x48\xb6\x31\xa4\x20\x1d\xd9\x43\x86\x75\x96\xb6\xdf\x46\xcd\xc8\xd8\x7c\x0f\xcf\xa7\x30\x57\x45\xf3\x54\xdb\x16\xb9\xac\x59\x0b\xc1\x26\x0b\x0a\x37\xa6\xd2\x3c\x6d\xa8\x44\x12\xdc\xed\x4d\xfa\x68\xeb\x97\x12\xf9\xae\x5b\xce\x14\x70\x47\x3c\x6f\x3b\xf3\xd2\x09\x90\x34\x7b\x8c\x48\xba\x31\x5c\xf1\x36\xb8\x4c\x41\x57\x20\xd2\xb9\x49\xd5\x43\xc8\xc8\xd6\x6f\xb2\xb7\x49\x1b\x42\x50\x17\xb5\xe7\xaa\x6a\x21\x35\x2e\x03\x06\xef\xcd\xc8\x34\x45\x0c\x5f\x83\x10\x53\x32\x2c\x4a\x9e\xd4\x4a\x36\x8b\x92\xa7\xb5\x92\xad\xa2\xe4\xf7\x5a\xc9\x76\x51\xf2\xac\x56\xf2\xb8\x28\x29\x01\xcb\x14\x3d\x51\x45\x35\x08\x43\xf2\xbd\xb3\x86\x7c\x1f\xa3\x71\xd6\x1f\x83\x51\x6c\x0f\xfe\x38\x46\xa6\x6d\x24\x90\x67\x3b\x5a\xd3\xaa\x7e\x66\xe7\x64\x47\x2d\x41\x09\xa6\x3a\xaa\xc7\x1a\xd6\xe5\x28\x05\xcc\x76\x69\x80\xec\xb3\x9b\x3f\x7d\x28\x99\xed\xd7\xe9\x3a\x37\x1f\x45\x86\x7e\x8b\x37\x9e\xf0\xed\xdf\x04\x86\x81\x92\x1b\x4a\xd4\x3b\x6b\xfd\xe8\x0d\x79\xad\x3e\xfa\xa5\xf8\xd4\x8f\xe2\x49\x14\x03\x79\x51\x7e\xfe\xaa\x0d\xdf\x23\x59\x35\xf4\x8a\xa4\x8a\xfb\x3b\xd6\xed\xd5\x92\x9c\xe9\xc7\x9e\xa4\x8f\x9e\xf0\x6d\x04\xf2\x97\x69\xeb\x75\x72\x8a\x14\x5a\x42\xa6\x78\x23\xd5\x32\x08\x09\x47\x4e\x43\x3d\xf4\xd4\xe4\x32\x8a\xd6\x0b\x57\x69\x61\xf4\x7b\x41\x32\x28\xaf\xf8\xc4\x73\x5e\x56\x96\x15\xbf\x50\x09\xa6\x15\xa7\x74\xc4\xd1\x9d\xe2\x7c\xcd\x9e\xce\xc9\xcb\x14\xaf\xc2\xae\x53\xb4\x0c\x3d\x4e\xb1\x98\xc2\x00\xad\xad\x9b\x5a\xd6\xb3\x42\x15\x87\x22\x27\x20\xdb\x76\xbd\x73\xd3\x2b\x94\x9b\x81\x3f\x71\x33\xcc\x93\x30\x55\x06\x85\x02\x6b\xeb\xaf\x77\x33\xa8\x34\xdf\xbc\x6f\xf3\xc1\x52\xb3\x61\xd9\xec\x3c\x85\xe1\x50\x87\x38\x58\x46\x1f\x58\xb4\x59\x29\x19\x2c\x95\x0c\x2a\x25\xb5\xee\x2a\x25\x5b\x4b\x25\x5b\x95\x92\xed\xa5\x92\xed\x4a\xc9\xe3\xa5\x92\xc7\x95\x92\x27\x4b\x25\x4f\x2a\x25\x4f\x97\x4a\x9e\x56\x4a\x7e\x5f\x2a\xf9\xbd\x52\xf2\x6c\xa9\xe4\x99\x2e\xa9\x07\xe1\xc7\x09\x19\x3f\xc8\x05\x85\xfd\x76\x78\x47\xa6\x3a\x62\x25\xbf\xf7\x93\x5d\xd9\x02\xd8\xb5\x9d\x97\x28\xbc\x7e\xa3\x09\x58\x0c\x4c\xa0\x82\xf7\x56\x3d\xee\x68\x3d\xcb\x47\x14\x92\x76\xd3\xb5\xd1\x1c\x8f\xed\x43\x34\x3f\x60\x27\xea\x21\xa6\x30\xb5\xe7\x64\x3f\x05\xf4\x06\x46\xeb\x3b\x92\xa6\xf0\x74\x93\x3f\x46\x08\xe7\x14\x52\x55\x11\x59\x7f\xcb\x82\x8c\x11\x8c\x23\x45\xa9\xe2\xbf\x0f\x39\xf9\x98\x2e\xb7\xa0\x8a\x1f\xdf\x91\x8a\x1b\xd7\xa5\xaa\x97\x3d\xfb\x01\xd2\xf3\xd3\x56\x0c\x84\x88\xa3\x17\x2b\xe4\xb9\xa0\xf0\x2e\xbd\xc3\xd5\xc7\x98\xa6\x56\xa2\xc7\x7c\x4a\xdb\x15\x30\xbe\x4e\x7c\x71\x82\x8e\x30\x4a\x96\x70\x50\x50\xf0\x1c\x94\x1f\x12\x67\x37\x23\x3e\x23\x59\x4a\x98\x36\x91\x28\x7e\x7b\xe5\x35\x80\x59\xb9\x2c\x5f\x39\x5e\xac\x9c\x89\xf7\x5a\x5f\xa6\x19\x39\x4d\xb5\x47\x46\x75\xad\x3c\xbd\x40\x7a\xb5\x8b\x35\x62\x95\x35\xba\x8e\x1d\xd6\x1a\x17\xb7\xba\x8f\x99\x1a\x09\x57\x5d\x4b\xd5\x25\x03\xf7\x5a\xc9\xa5\x07\x8a\xbb\x39\xb2\x3d\x70\x4f\x6d\x0c\xb7\x7c\xb6\x36\x1e\xee\x85\xcd\xc1\xbd\x54\x5d\x24\xaa\xf5\x99\x09\x09\x2a\x1c\x0c\x2d\x11\xa6\xaf\x7f\x64\x2c\xc2\xa8\x12\x41\x82\xf0\xf1\x60\x90\xc7\x7d\x4d\x0f\xf9\x05\xbf\xb1\x20\x30\xe1\x21\xa6\x2c\xca\x78\x19\xc5\xa2\x1a\xc9\xf5\x4c\xdf\xf9\x9e\x0a\xa7\x1a\x01\xf6\xae\xa0\xb7\x79\x52\xbc\xbf\x14\xf0\x36\xcf\xb0\xf7\xf7\xa3\xdd\xb6\x44\x9e\x3d\x34\x86\xde\x87\xa9\xd3\x1e\xdd\x03\xc3\x81\x9c\x9a\x70\x20\x4a\x66\xce\x57\xa3\x90\xaa\x8b\x17\xa5\xf8\x9d\xbf\xaa\x85\x0f\x59\x09\x16\x52\x4a\xff\x27\x09\xf9\x2c\x70\xa4\xd7\x65\x38\x0f\x38\x69\xa4\x61\x2d\xba\x05\x0b\xc4\xf7\x6a\xca\xcd\xfd\x04\x24\x94\x76\xf3\x87\xa9\xd6\xf8\xde\xa6\x0e\xe1\x89\x13\x84\xf5\x18\xd5\x31\xd9\x89\x09\x4f\xc8\x2b\x26\x79\x3f\x4e\x66\x44\xdf\xdf\x52\x78\x95\x56\x35\xe2\x26\xcc\xfb\x4b\x35\xdb\x44\x68\x8d\xf8\xdb\xf4\xae\xc8\x96\xb9\x8b\x90\x89\x98\x33\x32\xc1\x8b\x46\x26\xf8\xd2\xc8\x38\x71\x8d\x66\x92\x4c\x89\xf1\xb9\xe9\x33\xe0\x68\xe1\xc7\x30\xad\x0c\xfa\xba\x30\xe3\xf3\x92\x99\xbf\xdc\xfc\x95\xe6\xaf\x41\x1e\x2f\x84\x12\xef\x6f\xc8\x99\x3a\xbb\x96\x05\x0f\x86\x06\xd2\x95\xd8\xfe\x26\x75\xd4\x71\xb3\x2c\x75\xde\xd4\xbf\xa7\xb6\xce\xd4\x6e\x10\xdd\xb5\xfd\x42\xa8\x63\x87\x2f\x17\x70\xd0\xa4\xde\x39\x57\x08\x25\xa6\xe0\xa3\x72\x55\x87\xb5\x41\x27\x34\x9d\x02\xa1\xbc\xac\x7c\x8e\xb7\x9f\xe5\x15\x89\xe2\x3c\x15\x56\xc4\x3b\x0b\xa3\x9b\x55\x08\xd0\x7a\xc9\xe2\x7f\xc8\x8e\xcb\x3b\x61\x8c\x89\x8f\x26\x2c\x95\x96\x46\x88\xa8\x4d\x6e\x46\x87\x78\xb1\xec\x03\x1a\xca\x1d\x6b\x5b\x71\x9d\x5c\xc7\x3d\x40\x73\x50\x76\x9a\x7f\x78\x37\x23\x01\x79\x9b\xc2\x5a\x9f\xa4\x7d\xf2\x26\x05\xb5\x38\x42\xad\x0d\x86\xe9\xce\x96\x71\x11\xc7\xcb\xee\x4b\x9c\x7c\x97\x11\xd9\x77\xcf\xf0\x81\xe3\x83\xc2\xb2\xf8\x30\x23\x07\x29\xa8\x47\x10\xea\x37\x42\xde\x97\x56\x22\x70\x8c\xbf\x90\x33\x57\xac\xdb\x8b\x76\xa2\x2a\x5b\x25\xa5\x39\x79\xa1\x58\x8d\xe1\xe6\x6f\x02\x78\xcd\xe4\xc1\x65\x0a\xbd\x33\x67\xb8\xf9\x9b\x1a\xdf\x56\x6f\x48\x37\x94\x78\x96\xf5\xd9\x84\xaa\x1f\x02\x3c\x25\x35\x6a\x9d\xf9\x26\x30\xaa\xc4\x47\xe6\xa0\x6c\xc8\x60\xb8\xa9\x44\xaf\x52\x1e\x60\x46\x1e\x50\xc4\x60\x63\x46\xbe\xa8\x99\xb2\xaf\x5a\x24\x40\x02\x51\xe3\x9c\xf8\xc6\xd3\xdf\x44\x1d\x63\xf1\x0d\x44\xcc\x3f\x9a\xa9\xfb\x4a\x68\xb9\x15\xbe\x53\xeb\xbc\x38\xdd\x28\x84\xa4\x6d\x3b\x76\x86\x8f\x41\x38\x18\x85\xd6\x84\x82\x69\x88\x17\xb2\x95\xd7\x43\x79\xa2\xbd\xde\x66\x5e\x0f\xc5\x8f\x35\x15\x87\x4b\x4c\xa7\x04\xdc\x8a\xad\x9c\xfb\x54\xf3\x51\x00\x71\x66\x1e\x57\xd8\xca\xb2\xcd\x70\x7d\x9b\x81\x1d\x3b\x9b\x6a\x7e\x9b\xeb\x46\x63\x2a\xad\x5b\x02\x5c\x81\x21\x08\xe7\x69\x73\xa5\x32\x86\xca\x9a\x5a\x48\x56\xd7\x48\x60\x3f\x52\x05\x4a\xb1\x4e\xd3\xbf\xa0\xf0\xfe\x8e\xba\x5b\x95\xba\x5f\xdb\x4f\xc0\x9c\xec\xa5\x79\x0c\xb2\x42\xfe\xe1\x20\xe9\x48\xda\xd8\x51\x8c\xcb\x88\xae\x8f\xdf\x1a\x50\x57\xcf\x41\x21\x00\xbf\xf4\xbc\x75\xb0\x71\xd4\xdc\xf2\x47\x63\xcb\xea\xd0\x45\xad\x65\x96\x1b\x81\x34\x28\x7e\xf2\x00\xb5\x7c\xc9\x78\xe3\xa0\xc5\x8f\x45\x27\x28\x58\x54\x7c\x8d\xe3\xd1\xb9\x3a\xc3\x3b\xc0\xd5\x9f\x6b\x4a\xed\x73\x82\x8f\x3b\x14\xf0\xc5\x42\x0b\x94\x93\x00\x34\x86\xc7\x03\xff\xbc\xdd\xa3\x63\x9f\x64\xf0\x93\x4d\x14\x3b\x37\x61\xda\x6b\xb1\x26\xd1\x54\x6a\xe0\x4e\x6c\xaa\x89\xeb\x55\x18\x42\x6f\xa8\x7e\x95\xef\xa5\x26\xa1\x35\x88\xd7\x5d\x88\xd2\x7c\xa1\x26\x00\xe5\xe5\x3b\x72\x41\xab\xde\x82\xbc\xc9\x22\xb9\xe2\xff\x7b\x81\x31\xbf\x0b\xbb\xe9\xe7\x25\xa2\x20\x42\x09\xf2\x58\x8e\xd9\x59\x18\x51\x2f\xf1\x01\x07\x3b\xc4\x71\xc7\x8e\x62\xed\x91\x65\xef\x21\xe3\x2f\x36\xb4\xdf\x26\x6b\x65\xf2\xfb\xdd\x91\xd0\x8e\x17\x1c\x2f\x42\xb5\x36\xc1\x73\x56\xb2\x29\x65\x7d\x26\x6a\xc1\xdf\x70\xeb\x6e\xd5\x96\x5c\x55\x72\x6c\x83\xa2\xad\x95\x82\xc2\xbb\x8b\xe5\x6e\xfa\x45\x9e\xa1\xa2\x1f\x9d\x59\x7c\x47\xe6\x93\x5f\x22\xb8\x79\x31\x72\xe8\x74\x61\x1c\xe5\x5b\x7b\xd0\xf9\xbd\x2b\x86\xef\xfb\x21\xe8\xb5\x13\x7a\x71\x2e\x10\x2e\xe8\x08\x1b\x69\x09\x4b\x20\xe0\x2d\x7d\x86\x50\x48\x30\x20\x97\xe7\x78\xe5\x85\xb1\xde\xd9\x2b\xdb\xd3\x52\xa0\x92\x00\x10\x4c\x95\xd8\x97\x28\xb1\x4f\xbd\x4b\x00\x01\x78\x51\x48\xbf\xda\x29\x5d\xad\x4d\x80\x2e\x45\x2e\xc6\x00\x68\x97\xa8\xfe\x18\x8c\x06\x36\x92\x32\x63\xcb\x9e\x38\x73\x22\x20\x84\xa0\xed\xf6\x2c\x1e\xcd\x48\x1c\xc1\x8c\xa4\xf0\x64\x00\x18\x93\xdb\x9e\x91\x6f\x29\xbe\xd9\xdc\xd6\x6f\x16\x25\x73\xa9\xe7\x71\x6d\x27\x7d\x57\x8d\x3a\xc1\x38\xf9\x45\xae\x29\x1c\x5e\xa0\xbf\x59\x19\xf6\x8a\xd7\x4c\x7e\xfa\xe5\xb2\x57\x61\x6d\xb1\x15\x3b\x8c\x7c\x41\x11\x39\x63\x46\xe2\xd1\xfb\xd4\xfe\x9c\x82\xab\x01\x6e\x65\x5c\x81\x1e\x57\x50\xc4\xe9\x8a\xda\x75\x41\x75\xce\x74\x86\x1f\xc8\x39\x50\xda\xc4\x5d\x9e\x99\x28\x88\x3c\x72\xde\x08\xc8\x22\xe7\x40\x00\x8b\x5a\x2f\xbc\x8e\x6c\xd1\x67\x47\x68\x4c\xdf\x77\x29\x78\xb6\xc0\xb8\x62\x42\x87\x24\x03\xf7\xad\x2d\x74\x5c\x31\xd1\x67\x6a\xb4\x5e\xe4\xbc\x15\x90\x44\xce\x2b\x01\x61\xd4\x74\xe8\xe7\x24\x89\x60\x38\x18\xe2\x06\xff\xfa\x85\x3f\x9f\x3c\xd3\x7a\xb9\x5c\x4a\xc6\xd8\x58\x58\xb2\xbd\x85\xb1\x63\x4d\xbd\xed\xc7\xf8\x6b\x24\x37\x86\xb6\xc4\x6b\x3e\x2f\xd2\xb1\x65\x2b\xda\x34\x41\x47\x3d\x51\xae\x79\xbc\xa0\x10\x44\xed\x1a\x4e\xd5\xed\x13\xfc\xfc\x68\x46\x42\x05\x48\x5e\xa4\xfd\x57\x0c\x28\x85\x51\x0e\x8b\x6e\xb4\xd6\x4c\x92\xa3\xbf\x07\xd7\xb6\x1a\x4d\x31\xea\xd1\x85\xed\x6b\x02\x26\xdc\xec\x73\x51\x45\x75\x6a\xe8\xcc\xc4\xb0\xd2\x69\x1e\x24\xfd\xa3\x68\x10\x57\xe3\x58\x69\x97\x08\x16\x15\x71\x4d\xd3\x68\x7d\xfc\xc8\x6a\xea\x0b\x53\x88\x17\x8d\x51\xab\xe8\xa3\xe3\xf1\x2b\xf1\x25\x71\x66\x24\x88\xc0\xd3\xe9\x31\xc3\x80\x24\xa5\xf5\x49\x3e\xab\x1b\x92\x46\xc0\x10\x10\xfa\xec\xa8\x47\x92\x0d\x86\xd1\xe4\x81\xf5\x3d\x9a\x27\xb3\xe8\xbb\x90\x34\x47\x11\x43\xdc\xab\x6b\x79\x95\x3a\x53\xe2\xaa\xa5\xd7\xa1\x49\xd4\xa8\x9e\x17\x5e\x58\xcd\xeb\xca\xd4\x32\x3d\xcf\xcc\x46\x24\x21\xc1\xb8\x61\xf8\x61\x2d\x85\x3d\xaf\x84\x71\xe1\x18\x2f\xa1\xde\xba\x58\xe4\x0c\xc5\x33\xbd\xcc\x09\x68\xf7\xf2\x56\x73\x81\x0e\x77\x7e\xb2\x0f\x78\xca\x28\xb0\x33\x3b\x06\xb6\xa9\xa4\x1f\x26\x29\xb8\xae\x79\xef\x06\xea\x15\x97\x14\xbc\x37\xb6\x00\x77\xa2\xdf\x2f\xea\x99\x20\x34\x48\xfe\x5e\xc4\x60\xab\x18\xa2\xe2\xb9\x10\x25\x3f\xaf\xcf\xd2\xe6\x40\x27\xb5\xa3\x23\x5c\x31\xde\xf7\xde\x00\xef\xbb\x2e\x86\x63\xca\x22\x6d\x4f\x47\x21\x56\x93\x53\xf5\x51\x45\xba\x5a\x7f\x82\x9c\x2c\x8f\xe0\xf7\x7a\x8b\x67\xbf\x37\x35\x60\x1f\xf2\x06\x9b\x95\x06\x01\x89\xca\x1a\x67\xaa\xdf\x40\x3d\x6d\xc2\x39\x11\x30\xa8\x06\x5b\x69\xad\x89\x7d\x0e\x07\xc5\x02\xa8\x36\xda\x1c\x6a\x41\xe1\x8d\xd0\x96\x18\x67\xa0\x44\xb6\xc8\x59\x0a\x8a\x17\x73\x55\xe1\xef\x44\x78\x8b\x4c\x84\x37\x3f\x55\xcc\xc3\x41\x43\x20\x29\xf3\xcf\xb7\x10\x06\xd0\x90\x1a\xa5\x12\x97\xdc\x0b\x9b\x0c\x7b\x5f\x66\x24\xde\x40\x34\x82\x3d\xf9\x11\x74\x03\x8a\xb1\xca\xaf\xf0\xdb\x79\xe0\x2b\x13\xad\x69\x1a\x15\xd1\x9a\x0e\x04\x85\x77\x29\xe9\x4a\x62\x7d\x15\x49\x7c\xd1\xf1\x33\x81\x46\x0d\x1d\x9d\xac\x1c\x43\x88\x77\xa3\xb5\x16\xdf\x8c\xdb\x93\x04\xdc\x33\x3b\x46\x19\x78\x1c\x99\x08\x48\x97\x77\x36\x23\x18\x3b\x15\x05\x61\x75\x62\x74\x2e\x5d\x7b\x1c\x61\x3f\x93\x5a\x73\xd9\xe8\x72\x6c\x78\x68\x46\x98\x83\xfb\xc9\xd5\xb3\xec\xb3\x53\x14\xb7\xb3\x55\x8e\x6b\x4e\x64\x04\xa7\x29\x8a\x02\x35\xce\x4b\xf6\xd3\xbe\x7b\xf6\x3c\x76\x32\x54\x44\x2c\x05\xd8\xd5\x65\x68\xec\x49\x2a\x74\x55\xc2\xcf\xd4\x9e\x91\x6e\x04\x19\xc8\x7e\x8a\xaa\x4e\xfc\xa9\xa8\xa4\x7b\x50\xe1\x93\x4d\x66\x1a\xb4\x28\xc7\x21\xba\x07\x38\x44\x1c\x30\xfb\xff\x62\xa8\xec\xb4\x1c\x2a\x3b\xad\xf0\xe3\x6d\x43\xf5\x9c\x69\xa4\x87\x4a\x32\x3d\xb8\x34\x05\x0f\x4d\x9e\xd4\x57\x71\xcd\xed\x8f\x29\x2a\xa1\x9a\xbe\xcb\x96\xbf\x9b\x2d\x7f\x77\xcb\xae\x37\xb9\x8c\x60\x1a\x55\xda\x5c\x46\x70\x90\x96\xc3\xc1\xd6\x7a\xc1\x2f\x23\xf0\x6b\x2b\xbe\xbd\xd4\x1d\x3b\xd1\xdf\x8c\x35\xfb\x72\x52\x61\x23\x57\xaa\x5d\x46\xd0\xad\xd5\x7a\xb2\x5c\xeb\x78\xa9\xb3\xe3\xa2\xda\xd3\xd5\x6a\x45\x67\x65\xad\xdf\x6d\xed\x2a\xbb\xb2\xdf\xe7\x44\xf6\xa7\x4a\x6c\xc3\x3f\xd7\xad\xea\xb3\x3d\x5c\x03\x9c\x3f\xae\x4e\x29\x25\xa0\x9c\xb0\xc4\xe6\xef\x91\x7c\x75\xb4\x06\x5e\x68\xef\x33\xbd\x94\xfa\xb7\x29\x89\x8b\x92\x69\x04\x0d\x2a\x7e\x01\x46\x03\xbb\x0a\x55\x9e\x93\xf4\x3d\xb5\x47\x1c\x3d\x7f\xdd\x03\x5b\x38\x49\x9f\x69\x55\x7f\x71\x8b\x94\x38\xc4\xab\xc2\x93\x86\x1a\x35\x61\x25\x2a\x79\xea\xf4\x7a\xe6\x4a\x22\x3f\xc0\xcb\x1f\x2a\xee\x21\x3c\x5c\x21\xef\x86\x6a\x1b\xa8\x15\xad\x37\xd6\xd6\x1f\x47\xeb\x8a\x68\x9d\x63\x53\xd5\x85\x44\xab\xc5\x66\xe4\x43\x0a\xe8\x8a\x22\xfa\xec\x71\x11\xa2\xb8\x66\x43\x3d\xb0\x6f\x10\x61\x75\x29\xb0\xc7\xf6\x1c\x9f\x1f\x53\x70\x3f\xda\x71\x46\xac\x97\x49\x16\xf9\x9d\x38\x91\x9d\x34\x73\xc7\xa1\x44\xdd\xa5\xc2\xa9\x90\x27\x36\xee\x84\x29\x96\xcf\xb9\xec\x60\x88\xff\xbe\x95\xbb\x05\x65\xd5\x58\xc8\xb9\x71\xfa\x47\x3b\x4c\x2a\x76\xcc\xb9\x85\xfa\xe7\x0c\xde\x20\xcf\x16\xa4\x3a\x1c\x22\xbc\xc2\x13\x78\x62\xcc\xb9\x7f\xa0\x39\x37\xbe\xa8\xb8\x29\x3c\x33\x87\x7c\xeb\x81\xa3\x76\x45\xa1\x90\x7e\x77\xb4\x9b\xd8\xa5\xb9\xbe\xdc\xa8\x26\x68\xb6\x36\xbc\xca\xa9\x2e\xc7\xe4\x29\x39\xb8\x38\x25\xac\x6a\x06\xac\xbd\x13\x12\x33\xce\x39\x39\x74\x8d\xf3\x89\x7b\xad\x58\xa6\x63\x8a\x03\xbf\x4d\x1b\x1c\x21\x1a\x32\x6e\x22\x29\x2d\x73\x6e\x62\xb6\xfa\xc7\xf6\x0d\xd9\x4d\x4d\x14\x0f\x0f\xa3\xba\x0a\x04\x0b\x6f\xd7\xc6\x39\xe8\x54\x9c\x8d\x8e\x86\x02\xe6\xe4\x44\x2d\x19\xce\x5e\x8f\xf1\x3d\xae\xe5\x8a\x9b\xc5\xd6\x03\x23\x68\x8f\x54\xb3\xdd\x44\x49\xb5\x0d\x23\x44\xb6\x80\x9b\xdc\xff\x8a\x27\xd4\xd6\x8a\x2b\x63\x9e\x91\x4f\x29\xf2\xe9\xb5\x91\x8a\xbe\xfb\x71\x01\xcb\x16\xea\x27\x2e\xad\x18\xac\x68\xce\xb9\x01\x3c\xbd\x55\xf0\xdc\xd3\x60\xc8\x62\x04\x32\x3f\x4c\x27\x11\x9b\x77\x58\x10\xe8\xa8\x48\x98\x48\x3c\x5d\x0b\x8c\xd0\x02\xe2\x05\x90\x7a\xab\x1e\x0e\x7b\x1a\x48\x4b\xf0\xfc\x92\x83\x27\xba\xd7\x4c\x51\x0f\x7e\x4d\x57\x43\x69\x54\x86\xbd\xc6\x87\x54\xd5\x78\xb5\xea\xd7\xb5\xd2\x87\xda\x13\xb5\xd0\x13\x8d\xa4\x8b\x93\x19\x25\x0d\xae\x1a\x4a\x0a\x11\x65\xc5\xfe\x74\xc5\xd3\x48\x75\xe5\xe2\x9a\x3f\x86\x64\xc9\x77\x6f\x38\xb4\xb3\xd2\x65\x46\x2f\x55\x82\xd1\xc9\x57\x7c\x5c\xf0\xbe\x46\xc1\xc2\xd2\xfa\xbc\xd0\xeb\x93\x34\xb9\xdc\xe5\xee\x0b\xc5\x7c\xd1\x66\x2b\x5a\x1f\xa4\xec\xd5\xeb\xfd\xd7\xc7\xaf\x97\xe3\x94\xcd\xa3\x8a\x9b\x81\xbe\xbb\x33\x3e\x06\x37\xd1\x5f\xbb\x49\xeb\xbb\x7e\xcb\x65\xda\x2c\x02\x01\xf3\x48\x5f\x69\x5c\xff\x95\x6e\x1b\xc2\x5a\xad\xdc\xd2\x9d\x24\x04\x2f\xea\xd0\xcb\xbb\x75\x05\x2e\x74\x90\x30\xf7\x50\x7b\x57\x1c\x45\x8d\x06\xc8\x47\x4c\xe7\x5c\xc8\xdd\x64\xaf\xec\xaf\x82\xc4\x14\xd8\xdc\x8e\xc1\x3d\xd1\x17\xc3\xc7\x51\xab\xf9\xe1\x6a\x7a\x72\x2f\xcb\x2f\xea\x6b\x10\x19\x25\x35\x35\x69\x98\xd4\xb7\x39\xce\xb4\xfa\x64\x41\xe1\x75\xe4\x1c\x92\x3f\x87\xb0\x09\x83\xef\x14\xce\xda\x34\x02\x0d\xb9\xfb\x6b\xc0\x36\xb7\x67\xe4\x38\x6a\xd6\x16\xef\x71\x38\x8a\xd4\x74\x5f\x47\x74\x01\x71\xcd\x45\x77\xe5\x3c\x76\x6d\xe3\x7f\xa7\x61\xdb\x7d\x8f\x58\x6f\xae\x8f\xf9\x9c\x5c\x23\x1b\x07\x3b\xab\x53\xcf\x49\xc4\x7b\x45\xdc\x0d\x77\xde\x77\x7d\xba\xea\x7a\xab\x59\x4d\x6f\x99\x74\xe4\x8d\x2b\x5f\xd3\x94\x6f\x4e\x6e\x22\xb4\x12\xe8\x4a\x72\xac\xa4\x19\x6f\x54\xd2\x2c\x5e\xa5\x59\x16\xb5\x77\x93\x92\xe4\x6d\x17\x87\x7d\xe9\xb4\x86\xc8\xa9\x30\x64\x5c\x3c\xed\xdd\x76\xcd\x20\xe9\x7b\x14\xd5\x13\xdc\xad\x75\x0a\xe8\xe8\x16\xe6\x8e\x6e\x9e\xc3\x56\x1c\xdd\x12\x0a\xde\x77\x4a\xed\x06\x24\x10\x2e\x30\xd6\x66\xf3\x51\xaf\x3a\xe5\xa1\x7b\x77\xb4\xc6\xe0\xaa\x7d\xf7\x57\x50\xe8\xf0\x4e\x14\xba\xbd\xba\x6b\xe8\x65\xbd\x8a\x63\xab\xb4\xb0\x82\x6d\xdd\x0f\x36\x9a\x9e\xe8\x36\x76\x95\x1c\xa8\x32\x2f\x23\xdc\x10\xc5\x1c\xfb\x1d\x33\x4d\x14\xa4\x22\x0a\x3c\x27\x0a\x28\x8a\x57\x95\xfb\x59\x83\x1f\x9d\x1a\xdd\x10\xc1\xef\x24\x05\x09\x19\xbc\x66\x15\xf4\xb9\xae\x45\xb9\x47\xcb\xbc\x4d\xa6\x3d\xdd\xde\xae\x4d\xa9\xcc\x0e\xed\x18\xd8\x17\x5b\x82\xfb\xcd\xe6\xe0\x76\x35\xa6\x78\x65\x6c\x3d\x12\x11\x5e\x84\x31\x8b\xd6\x25\xf5\x15\xc6\xb4\xe2\x85\x68\x4f\xc1\xa1\xed\x53\x52\xf5\x99\x23\x93\x93\xbb\x55\x85\xba\x6b\x0b\x30\x99\x82\xe1\x8d\x19\x07\xf3\x7d\xc1\xd3\xb4\xdd\x3e\x45\xab\x20\xd6\x5b\xa4\x08\xce\xfc\x79\x93\x3d\x4a\xca\xa5\x8c\x6a\x66\x27\xb9\x45\x8a\x1f\xa6\xcc\x8d\xfe\xa5\x04\xcc\x67\xda\x1c\x67\xc2\xb5\xf5\x87\x20\x27\x09\x79\xa3\x64\x94\x3c\xcd\x4b\x6e\x7a\x72\x20\x74\x4e\x18\x3d\xb9\x32\x17\x4c\xca\xc9\x0b\xa1\x13\x2d\x8b\x3b\x8c\x2f\x7e\xb2\x8f\xb6\x04\xf6\xc9\xce\x80\x7d\xb6\x39\xb0\x67\x36\x03\x17\xf5\x67\xe7\x7a\x49\x0f\xcc\x92\x5e\x24\x27\x5c\xa4\x61\x12\x97\x8b\xea\x66\x61\xe4\xbf\x42\xeb\x9e\xa5\x57\x5f\x52\x2e\x2a\xaf\x04\x8b\xbd\xcb\x6a\xc2\x9b\x69\xb8\xdc\xcf\xb4\xd2\x71\xca\x71\xae\xda\xc6\xe5\x65\x94\x5b\xdf\xc8\x50\x4d\xf1\x20\x37\xaf\x89\x83\xf0\xc2\x82\x57\x62\xa9\xfd\xbb\x38\x48\x70\x51\x74\xa5\x28\x4b\x31\x8a\xdc\x99\xea\xf3\xad\xc9\x3c\x7d\x1e\xdd\xc3\x1f\x27\xe7\x0e\xb4\xd7\xfa\xa7\x00\x62\x78\x19\x2d\xd3\x65\xa1\xe8\xb2\xb6\x06\xdf\x8b\xd6\x5d\x81\x69\x01\xe0\x3c\x82\x82\xc1\x3e\x63\xf0\x45\x31\xd8\x39\x82\x70\x8f\xec\x9f\x2e\x3a\xbe\x2c\x0c\xda\xa3\xb0\x1f\xb5\x87\xfd\x7b\xa6\x83\x1e\x3d\x59\xbd\xaa\x7c\x62\xb0\xc5\x94\xec\xba\x70\xa4\x0e\xb9\xfb\xd8\xb0\xf6\x1c\x24\xd5\x71\xa8\xea\x58\xe1\x31\x32\x65\xde\xcc\xf6\xc0\x3b\xb5\x5f\x71\xe2\xd1\x8a\x38\x33\xd4\xe4\xc9\x83\xc4\x99\x92\xb3\x08\x8e\x11\x75\x0c\x4c\xaf\x0a\x33\x68\xb2\x51\xf7\x37\x35\xdd\x7d\x56\xdd\x81\x37\xd0\xd7\x40\x1a\xe9\x1d\x70\x60\xab\x36\xa9\x79\xc3\x53\xfb\xa5\x66\x0e\x0b\x09\xe0\x8b\xf6\x51\xcd\x5d\x54\x6b\x0a\x04\x74\x65\x76\xe6\xe4\x2a\x02\x13\xaa\x3c\x59\xa8\x21\x0e\xf5\xd0\xc2\xd5\xa1\x9d\xda\xef\x39\x51\xe4\x69\x68\x87\xe5\xa8\x3e\x54\x47\x35\xb0\xc3\x06\x04\xea\x9d\xda\x3f\x38\x09\x69\x3e\x3c\xdc\xcd\xd7\x98\xfd\xf6\x5a\xcb\x71\x57\xaa\x78\x59\xbc\x0c\x9a\x3b\x7a\xc1\x49\x40\x97\xe7\xf9\x86\x93\xb7\xea\x6d\xa3\x57\x7c\xde\xee\x6c\x99\x79\xdf\x6e\xf6\x82\x8f\x4b\x36\x6c\xb8\x24\xee\xed\x25\xa4\x5a\xd6\xe2\xa8\x2f\x2b\xc1\x26\x56\x8d\x8f\xcb\xc1\x5c\x25\x8d\xf4\x73\x46\xf6\x8c\x24\x51\x10\xce\x40\xc3\xa4\xe6\x38\xdc\xc7\xea\x33\xb3\x1c\x26\x83\x26\x98\xc4\xeb\xb9\x02\x0a\x35\x19\x74\x35\x08\xea\x4e\xbc\x81\xee\xc4\x80\xa0\xbb\xba\xcf\x03\xdb\x6d\x06\xba\xc7\xb6\xab\x8e\x75\x3e\x1c\xef\xac\xa5\x83\x33\xd3\x01\x5b\xe2\xd4\x52\x0d\x6c\xa6\xad\x01\xb3\x74\xb5\xf5\xd0\x4e\x9b\xa1\x6b\xd3\x4e\x9d\x1b\x72\x51\x74\xd1\xcd\xe7\xd0\xd0\x49\xd7\x74\x52\x0c\x61\x05\x24\xdc\x15\x46\xe7\xf7\x7a\x95\xf9\x4a\x95\xba\x07\xbf\x17\xd7\xab\x6c\xd6\x79\x58\xef\xba\x52\xa5\x45\x4a\xfb\xb1\xcc\xba\x7d\x8c\xd6\x67\x65\x9c\xd9\x1c\x6e\x6d\x09\x99\x4e\xb5\x95\xc1\x8e\xa6\x38\xbb\x51\xb3\x7f\x9e\xfb\xf0\x21\xb1\x2c\xcd\x7b\x69\x6d\xa0\x9b\xe7\x49\xe8\x33\xd8\x8d\x50\x3d\x42\xed\xd3\x05\x9c\xb6\xa2\xe4\x6b\x99\xdf\x1c\x8e\x74\x10\x5d\x3b\x4f\xb4\x60\x42\x1c\xbf\x5b\xfe\xb8\x14\xf3\x4a\x53\x9f\x37\x24\x15\xa3\x0b\x8f\x19\xb6\xa1\xc8\xe0\xb4\x58\xc0\xa7\x36\x14\xae\xe3\xc9\x3a\x96\x49\x79\x27\xd5\xbc\xa4\x8e\x25\xf8\x40\xe6\x81\x04\x63\x47\x3d\x32\x90\xce\xbb\x08\xaf\x98\xf3\xbb\xb9\x4a\x02\x45\xf1\x5c\xa2\xff\x7a\xac\xea\xc4\x4b\x97\x6f\xda\xf8\x42\xc7\x11\x86\xd3\x48\x47\x54\xa9\xe6\x26\xa0\x70\xd8\xba\x46\x2d\x91\xdf\x1f\xc4\xa5\x03\xbd\xc9\x51\xb5\x9c\x7d\x40\xf4\xb3\x3c\xa8\x23\x2d\xd7\x0c\xaf\x36\x9e\xe7\x61\xce\x78\x9f\x3d\x7c\xb8\x1c\xf8\xad\xa8\x13\x3b\x72\xb1\x20\x31\x99\x92\x8f\x11\x9c\xae\x38\x48\xbe\xd4\x34\xba\x9c\xa8\xde\xf1\x87\x0f\x75\x60\xb3\x3e\x1b\xc5\x7d\xd7\x8e\xe9\x42\xc9\x92\x19\x05\xb5\x0e\x04\xb3\x71\x1f\xd3\x7e\x77\xd4\x4d\x6c\x0c\x3c\xf0\x29\x82\x6e\x62\x82\xfa\x3e\xb4\x34\xaa\x54\xbb\xff\x04\x98\xd4\xb2\xf5\x49\x74\x2f\xa3\xec\x4b\xc5\x38\x31\x05\xbb\xb7\x36\x87\x43\x3b\x81\x23\x9b\xc1\xb1\xed\xc1\x17\x3b\x83\x5d\x0d\xcb\xb7\xd1\x5f\x48\xb4\x26\x49\x11\x07\x98\x07\x46\xd7\x8c\xdc\xf5\x1a\x75\xc7\xc7\x08\x44\x7f\x07\x44\x3f\x03\xd1\xbf\x05\xd1\x9f\x41\x7e\x9d\x84\x79\x0f\xa3\xa5\x54\xf0\x45\x22\xf8\xf6\x04\xe7\x71\x7f\x07\x77\x34\x43\x3b\xfa\x5b\x34\xa3\x9f\x3d\xaf\x8a\xce\xaf\x70\xdd\xcf\x28\x78\x7a\xab\x4a\xa6\x52\x0f\xb7\x39\x77\xe4\x8c\xdc\x46\xe8\x50\x73\x9e\xc0\x69\x06\x07\x21\xc1\xc7\x2b\x06\xf3\x90\x58\x01\x8b\x52\x6e\x69\xd5\x3a\xbc\x8d\xd6\x04\xd1\x68\xcc\x5f\x5f\x7c\xe3\x30\x51\x64\x05\x4f\xe5\xda\x0b\xe5\x19\x79\xab\x46\x43\xa4\x23\x1a\x6f\x31\x6f\x2b\xf1\x40\x71\x92\x6a\x59\xd4\xa2\x08\x88\xfb\x33\x20\x78\x7b\x7a\x46\x09\x1a\xf8\xd0\xef\x68\x68\x92\xfb\x93\xbf\xa9\xcc\xdf\x5b\x93\xc9\xae\x71\xa5\xb5\xbf\xc2\x99\xc1\x0c\x1a\x11\x60\xea\xbe\x92\x38\x5c\x90\x18\x3c\x3a\xca\x47\x56\x64\x8a\x29\xb6\xe1\xbb\x42\x82\x45\xbc\xcf\x05\x1c\xac\xd9\x8f\xc6\xfc\x2c\xae\x41\xaf\x79\xae\x45\x14\x59\x17\x4a\xca\x9b\x91\x2b\x38\x51\xdf\x7c\x21\xee\xf3\xcf\x1b\x41\xf2\xb0\xed\x14\x0e\x8a\xc8\xce\x14\x7f\xe8\x00\xce\x78\xe1\x72\x1b\xc1\x4a\x08\xe7\x1c\x52\xe6\x98\x6e\x94\xea\x36\x45\x20\x68\xaa\xe0\xac\x12\xac\xda\xfc\x2e\x23\x53\x9b\x17\x3a\x10\xb5\xf9\x61\x1c\x09\x8c\xfc\xa7\x06\xa7\x44\xb9\x30\xbe\x50\xc3\xab\xe5\x34\x4c\xfe\xd5\x9d\xd3\x27\x4d\x23\x68\x4c\xf4\xea\x24\xc4\x33\x28\xbf\xc0\xe1\xa7\x1a\x87\xd7\xa1\xcd\x44\x7f\x15\x14\x62\xbd\xa7\x44\x22\x94\x95\x9b\xaa\xc4\xc4\x99\x92\x73\x8e\x8e\x0f\xdf\x7d\x7c\x6b\xc1\xb5\x44\xd9\x1d\x01\xfb\x4d\xb9\x32\x6a\x66\xf8\xee\x0c\xdf\x72\x3f\xd4\xf7\xd9\x27\x62\x39\x4b\x6b\x5c\x5a\x91\xc6\x90\x39\x72\xed\x29\x13\x80\xde\x44\x59\x7e\xce\xc2\x35\x0e\xfd\xf9\x15\x9a\xa0\xe0\x5e\x2b\x14\x9c\x80\x70\xc9\x38\xa0\x30\x09\xc8\x29\x35\x19\x25\xc3\x96\x81\x1b\x18\x8a\xf9\xcc\x42\x5d\x87\x80\xb0\x0a\x47\xea\x39\x77\xb6\xd0\x20\x82\x10\xda\x30\xe8\x94\x9c\x28\x70\xdf\x91\xf0\x60\xa8\xfe\xad\xfc\x1f\x33\x41\x99\x0f\x55\x3e\x5e\x05\x57\xbd\xda\xab\x03\x3c\xc3\xd3\x8e\x2f\xb5\xd0\x48\xe1\x4d\xe4\x90\x2c\x71\x50\x19\x76\x05\x9f\x39\x1c\xa8\x79\x5c\xc1\xcb\x04\x4d\x27\xd4\xe3\x55\x02\x07\xe6\xf1\x07\x87\x50\x3f\xbd\xe7\x70\xa6\x9f\x5e\x70\x78\x6b\x8a\x5f\x71\x78\x65\x1e\x75\x50\xc4\x86\x99\x69\x3e\x66\x41\xbf\xd3\x26\xbb\x09\x44\x89\x4d\xec\x14\x9a\xc6\x64\x28\x6d\x1e\x44\xce\x0f\x01\x1f\x22\x13\x78\xf1\x8b\x36\x5e\xd8\x5a\xc0\x0b\xfd\xb4\xb9\x80\x1f\xfa\xe9\xc9\x02\x3e\x47\xf7\xc9\x41\x75\xd5\x62\xa3\x3c\x06\xee\xbc\x64\x3a\x28\xed\x8c\x7c\x8e\xb4\x8d\x92\x9b\xe8\xd4\xbb\xd6\x44\x24\x7e\x86\x8d\x2c\x08\x12\x0c\xcd\xa6\xa4\xd6\x91\xa2\xe8\xb6\xa2\xfe\x5e\x53\xab\x8b\x5a\x36\x81\xbc\x65\xe2\xe4\x71\x91\x1c\x47\xb7\xcb\x7f\x57\x5b\x07\xb5\x30\x4e\x3b\xba\xf5\x68\x68\x0f\x20\x6c\x88\xd9\xc4\x2b\x96\x17\x35\xef\xe8\x95\x84\x1a\x9d\xeb\xc4\xc4\x68\xd2\x9f\x5f\xfa\xb0\x5f\xf3\x96\x31\x1f\x86\xcc\xb9\xe5\x24\x9f\xf3\xa5\x94\x13\xfb\xd1\xa3\x28\xf1\x58\x74\x99\xa4\xd2\x7e\x36\x78\xb6\xf5\xc8\xaa\x6a\x23\x22\x38\xd7\x6e\xde\x63\xe7\xa7\xbe\x85\x3b\xc5\x3b\x35\x59\x6a\xaa\xa3\x04\xdc\x13\x7b\x58\x81\x9f\xee\x2a\x7a\x9b\xae\xbe\xf2\x57\x5f\x45\xab\xaf\xd2\xd5\x57\xee\xea\xab\x60\xf5\x55\xb8\xfa\xaa\x01\xed\x36\xd0\x50\xb6\xfa\x2a\x5b\x7d\xd5\x90\x46\xb6\x81\xff\x12\x6b\x58\x32\x25\x8a\xfa\xe0\x3e\xb1\x43\x70\x9f\xd9\x01\x78\xae\xed\x81\x77\x6d\x73\xf0\x62\x3b\x03\xef\x87\x2d\xc1\x9b\xd9\x29\x78\x73\x9b\xe1\x3d\x26\x78\xef\xec\x04\xbc\x53\x3b\x02\xef\xcc\x8e\xc1\xeb\xda\x53\xf0\x06\xf6\x18\xbc\xa1\xdd\x05\xf7\xc8\x76\x17\xf5\xff\x2d\x5f\xa4\xe6\x3b\xf7\x60\x08\xee\x07\xbc\xad\x23\x33\x72\x91\x60\xf8\x3f\xf5\x78\xa9\x1e\x3d\x4a\x09\xa7\x24\xa4\x64\x96\x50\xcd\xf0\x92\x8c\x12\x46\x49\x98\x94\xff\xc5\x94\x78\x94\x48\x4a\x7e\x7a\x37\x76\xb2\xd0\xec\xed\x0f\xe1\xdc\xc6\x0a\x44\xde\xb7\x9e\xe4\x52\x84\x10\x1b\xc4\xb2\xad\x8d\xb3\x22\x8d\x10\x7c\x5d\x67\x2f\x6b\x44\x2b\x69\xef\x12\x09\xbb\xa4\x14\xab\xbe\x45\xed\x9c\x58\xa2\xa8\xcf\xa9\x6b\xa2\x12\xc5\xbe\xf3\x2d\x22\x96\x17\xb1\x34\xfd\xc8\xc6\xdc\xa2\x20\xfc\xdc\x29\xcb\xbd\x56\xe2\x9d\xf4\x9d\x6f\x31\xb1\xfc\x70\x6a\x51\xe0\xfa\x47\x3a\x61\xb1\x45\x21\xf3\x9d\xaf\x31\x30\xdf\x99\x11\xe9\xc3\x29\x20\x1a\xe6\xe6\x29\xf3\x89\xb5\x9f\x30\x3f\x8c\x2f\xfa\xfd\xbe\x45\xbf\xeb\x70\x36\x9e\xef\x08\x01\x89\xdf\x12\x15\x26\xf9\x32\x99\x70\xf1\x92\xa5\x9c\xd0\x05\x84\xfe\x5f\xb8\x13\xd3\x17\x62\xb9\xf6\x60\xf9\x12\x81\xf9\x75\x5c\xf1\x89\x15\xb7\x60\x81\x9e\x94\x9b\x49\x99\xa8\x69\xb9\xfe\x3d\x03\x29\xa5\xba\x61\x68\x51\x88\x7c\x87\x09\xf0\xfd\xf6\x85\x8f\x7c\x88\xc1\xf4\x20\xd0\xde\x6c\xea\x3b\x9e\x80\xae\xdf\xb0\xc5\xb1\x13\x8f\xac\x80\xf5\xc6\x61\x9c\xa5\x96\xad\x1e\x27\x51\x96\x5a\x25\x22\x0a\x7c\xb5\xc8\x27\x88\x8b\x62\x9f\x58\xae\x8c\x3b\xae\x8c\x7b\x49\x26\xa3\x30\xe6\xbd\x30\x0e\x92\x8e\x9b\x08\x9f\x8b\xde\xa0\x33\x16\xbd\x61\x67\xec\xf6\x86\x48\xe7\xa7\x3e\x58\x63\x26\x2e\xc2\xb8\x17\xf1\x40\x5a\x60\xf5\xb6\x04\x1f\xab\x3d\xd2\x7b\x98\x62\xe7\xaa\xdb\x80\xa1\xb2\x1c\x3f\x31\x16\xbd\x4d\xac\x73\xaa\xb6\x5e\xf1\x5d\x89\x31\xf4\x1b\x6b\x30\x92\xa1\x8c\x14\x08\x5d\xea\x75\xc9\x22\x8b\xc2\x44\x3f\x33\x8b\xc2\x85\xaf\xad\x00\x5b\x97\xe8\x98\xe7\x61\xea\x5a\xab\xfc\x08\x4c\x15\x72\x48\xfe\xcc\xf1\xb5\x05\xf8\x94\xaa\xc7\xef\x68\x54\xe4\xaf\x4d\xcc\x13\x57\x33\x41\x98\xd0\x1f\xda\x83\x19\x8c\x3d\x89\x73\xe1\x93\x0c\xb3\x85\x8c\xf4\x31\x63\xfd\xee\x88\xa0\x37\x47\x91\xd2\x2d\x76\x4c\xe2\x80\x99\x24\x18\xb3\x33\x91\x30\x96\x44\x6e\x58\x1d\x75\x52\x28\xc5\x54\xc8\x8d\x75\xb0\x86\x68\xaa\x21\x28\xe6\x0d\xa9\x7e\xb5\xf6\xb5\xb1\x24\xf9\x40\x74\x66\xaa\x0c\x3f\x56\xe9\x49\x7f\x60\xc3\xf4\xb6\xf4\x5e\xbf\xc3\x90\xec\x8d\x27\xf0\x90\xe4\x91\x99\xfa\xe9\x24\x0a\x25\x79\xf4\xcf\x74\xe3\xd1\x85\x92\x13\x6f\xcc\x1e\x33\x71\xc1\xa5\x45\xe1\x5a\x6f\xac\xf4\x2d\x0a\x3b\xe6\xf9\xd2\xa2\x70\x64\x9e\x15\xc3\x78\xec\xb7\x5f\x9c\xc7\xe8\xb0\xd1\x67\x43\x4a\x47\x15\x70\xde\x91\xf7\x81\xe7\x5c\xb6\x68\x04\xd8\xfc\xdc\x74\x6a\x00\x6b\xe1\x9d\x85\x02\x59\x3b\xff\xe0\x9d\x5f\xb2\x28\x9c\x30\x52\x8c\xb4\xed\x7b\xea\x70\xb6\x7f\x6e\x41\xe1\xb5\x5e\x95\x48\xe1\x8a\xb3\xa5\x55\xe1\x50\x48\x3a\xed\x99\x17\x44\x35\xf3\x82\x12\x07\x46\xe7\x79\xde\x5d\xcc\xbc\x66\x9f\x63\xe4\x0d\x23\x9b\x56\x72\xad\xa0\x16\x46\xc0\x39\x39\x05\xcd\xfe\x53\xb8\x5a\x7b\x38\xea\xa9\x20\xe3\xc2\x8f\xc9\x7f\x8e\x97\x37\xbe\xf6\x27\xe8\x73\x9a\xa7\xac\x7a\x69\x76\x9c\xb9\x78\xfa\xcf\x7d\x47\x32\xd8\xf3\xdb\x1c\xe7\x20\x76\x76\x09\xe1\xce\x8c\x9c\xf9\x26\xe6\xa9\x84\xcf\x92\x54\x12\xc9\xd0\x6a\x00\xf4\x56\x5c\xf0\xb9\xc0\x05\x31\xc5\x28\xe8\x2e\xe6\xc3\x08\x95\x6c\x82\x2e\xcb\x6e\x85\x71\x7b\xed\xeb\x9d\x53\xb8\x6f\x92\xa4\xa1\x66\x7b\x51\x92\x08\x3d\x4b\xc3\x1c\x8b\xc2\x8b\xb8\x17\x4a\x3e\x4e\x7b\xe8\x22\xde\x89\xc2\x54\xf6\x74\xa8\x7c\xf5\xba\x04\xc0\x89\x42\xaa\x6e\x6f\xbb\x04\x41\x59\x80\xc4\xac\x37\x1c\x60\xe9\x66\xc7\xef\x05\x11\xbf\xe9\xac\x74\x9c\x37\xfb\xa1\x64\x4d\x18\xfc\xf1\x02\x2d\x10\xdf\xab\x93\xe0\xf9\x4d\x12\x84\xc8\xc8\xcf\xae\xfd\x0c\x73\xa2\xa1\x08\x75\xec\xeb\x48\x29\xb6\x82\x34\x8b\x02\xc1\xf0\x9a\x4f\x69\xbf\x3b\xd2\x6f\xec\x37\x4c\x5b\xfd\x7f\xc1\x6f\x9c\x66\x6a\x6d\x8e\xfa\x2e\xaf\x54\x41\x6a\xdd\x72\x0c\x7c\x16\x5f\x70\x51\x39\x08\x6d\xb0\xcf\xe7\xbc\x97\x46\x2c\xbd\x6c\x38\x00\x85\x7e\x40\x91\xfe\x62\x08\xf1\xbf\x7b\x08\x2e\x8f\xa2\x96\x31\x7c\xc8\x8a\xef\x2f\xeb\x38\x8b\x5b\x97\x91\x16\x3c\x1d\x6b\xe3\x43\x48\x3e\x64\xab\xc6\xf6\xa5\x5f\x73\x88\xca\xef\x81\xc9\xe0\xe5\x2a\x3e\x4d\x83\x5b\x25\x75\xe0\xbd\xf1\xca\x2d\xc3\xe8\xaf\xcd\x33\x8a\xc2\xf8\x7a\x65\x2e\xfb\x61\x7c\xad\x11\x0a\xc1\x24\x5b\x70\x41\x04\xfa\xb9\xe5\x58\xf4\x65\xd1\x0b\x1e\xc6\x4e\x01\x89\x43\xec\x06\x67\xf6\x96\xe9\x73\xc2\xde\xd3\x12\x7a\x0c\x03\xa7\x23\xe9\xa3\xef\x88\x46\x5b\xfb\xfe\x9a\x34\xbf\x45\xfe\x9d\x51\xbb\x7f\x82\xb6\x0e\x2e\x97\x47\x56\x97\x47\x9f\xac\x2a\x73\x92\x2f\xc7\x32\x4c\x48\x7e\x23\x7b\x46\xb7\x64\xd8\x97\x2c\xe5\xa2\x97\xf2\x88\x7b\x8a\x7d\x09\xe3\x50\x86\x2c\x2a\x4a\x7b\xe3\xe4\xb6\x77\x47\x95\x19\x77\xaf\x43\x79\x47\x2d\xb3\x5d\x5e\x12\x29\x99\xd1\xfa\xaf\xc7\xae\x37\xf0\x0b\xba\x93\xf9\x44\x6c\xfc\xc3\xb1\xfe\xb1\x11\x6f\xfc\xc3\xfa\x07\x6e\xc9\x5d\x94\x45\x13\x94\x43\x46\xce\x89\xd6\x39\xc3\xd8\x27\xd6\x1b\x04\xc1\x8e\x3b\xef\xc8\xcb\x30\xed\x44\xcc\xe5\x51\xe5\x2b\xd6\x46\xce\x3f\x2f\x80\x53\xbb\x61\x89\xd4\x67\x52\xee\x25\xb1\xcf\xc4\x7c\x75\x45\x55\x1f\x1f\x13\xd9\xc1\x05\x37\xe7\xe1\xbb\x42\xe0\xde\xaf\x5f\x18\x21\x1a\x63\xe0\x30\x67\x3d\xfa\x19\x0e\x72\xfc\x33\x27\x5d\x85\x7f\x18\x5a\x24\x49\xe7\xab\x20\x18\x31\x0a\xef\x4c\x46\x67\x92\x48\xc5\x9a\xe0\xf1\xb2\xec\xa5\x9f\xa9\x05\xd2\xa9\x8d\x7f\x1c\xe9\xd1\x0e\x8a\x65\x9f\x5d\x86\x92\xf7\xd2\x09\xf3\xb8\x05\x56\x9c\xcc\x04\x9b\x54\xa6\x22\xf5\xf0\x97\xa0\xea\xb4\x8e\x85\xc7\x6e\x6f\xcb\x40\x7d\x22\x81\xc1\x2e\x49\x74\x14\x1d\x31\x9a\x91\xcb\xa2\x5a\x89\xe1\xcd\x10\xf2\x73\x32\x23\x7b\x3e\x60\x72\xd3\xac\x38\x27\xfa\x50\x7c\xf4\xdb\x6f\x08\x30\xf7\xad\x3a\x27\xd3\x04\xa6\xda\x72\xd3\xaf\x9a\xf8\xe8\xe3\xa3\x7d\xa3\x62\x93\x8b\x2b\xae\x5e\xd0\x54\xa7\x30\x89\x7a\x8f\xcd\x80\x76\x65\x1b\x1d\x74\xc9\xbe\x8f\xa7\x12\x93\x14\xba\x78\xbf\x13\xe2\x25\xcb\x1e\x66\xe5\x5f\xa0\x9e\xa3\x62\xb1\xef\xde\x22\x97\xcb\xb0\x82\x83\x48\x8c\xa9\x26\xcf\x97\x3b\x9c\x91\x8f\x3e\x86\xa6\x52\x44\x16\x14\xd6\xcb\x0a\x95\xe8\x27\xa6\xc0\x49\xef\xa8\x86\xa9\xb4\x13\x24\x59\xec\xa3\x41\xb9\x27\xee\x10\x3f\xdf\x07\x46\xfc\x3c\x55\x12\x10\xb1\xbc\x4b\xee\x5d\xe3\xe1\x7e\x67\x24\xaa\x78\x92\x29\x1e\xf3\x93\xe1\x9a\xf4\x71\x80\x43\xbf\xb4\x2a\x35\x7c\x28\x14\x8d\xbf\x53\x54\x45\x9d\x18\x36\x75\x3e\x51\xbc\xc8\xad\xdf\x2e\x3f\xe7\x3c\x81\x5a\xe9\x98\x4d\x91\x3c\x97\x18\xe8\x53\x09\x48\x52\x81\xa7\x82\x51\xad\x34\xef\x79\x49\x2c\x45\x12\x15\x3f\xd5\x00\xdc\xe4\xa6\x6c\xfb\x4e\x33\xb3\xbe\x99\x19\x96\x21\x83\xb1\xdc\x41\x2f\x9f\xe6\xa9\x5f\xe6\x7d\xa4\x14\x3e\x33\x7d\x5b\x23\x41\x64\xd4\xa0\xff\xea\x79\x59\xe9\xc5\x0f\x3d\x54\x6a\xdd\x5d\xd7\xe7\xa9\x27\xc2\x09\x32\x3f\xe5\x79\x8a\x0d\x72\xd1\xe0\xfd\xca\x5f\xef\x7f\xd9\xbe\x6a\x26\xc7\x51\xf5\xfb\xaa\x0a\xd2\xb2\x0a\xc7\xce\x4b\xfe\xdb\x60\x59\xe6\x5d\x2b\x28\x8a\x7d\x0b\x2c\x29\x58\x9c\x4e\x98\x40\x2d\xb3\xc1\x07\x41\x12\x6b\xec\x7c\xc9\x45\x58\xbe\xf6\x32\x91\x22\x5e\x9e\x24\x61\xac\x55\xd4\xba\xc0\x20\x5c\xc4\x1d\x31\x37\x8b\x9f\x0f\x45\x63\x60\xbc\xb6\xc2\xc1\xe8\x59\xbf\xf5\xef\x19\x97\xf8\x8d\x86\xd9\xc2\xf4\x8c\xc2\x81\xef\xfc\x83\xc7\x53\xa7\xaa\x50\xfd\x07\x7c\xd0\x80\x18\xaa\x1a\x5f\x7c\xe7\x77\x78\xe1\x3b\xc3\x2d\xf8\x81\x42\xb0\xd4\xac\xed\xb5\x84\xae\x44\x77\x77\xf8\x7c\x0f\xd5\xc1\xb0\x50\x1d\xbc\x6f\x3a\x09\x3a\x4a\x94\xb1\xb3\xfe\xaa\x6a\xa4\x09\x58\xd7\x7c\xfe\x32\xf1\xb9\x05\x18\x29\x1e\x4f\xa7\x71\x33\xf4\x0b\x9f\xc0\x6e\x50\xf5\x0b\x4c\x82\xd2\x73\xef\x9b\x5f\x78\xee\x79\x42\xa7\x98\x8e\xa7\x5a\x07\x34\x66\x91\x3a\x94\x62\x8a\xf3\xd4\x1f\xa7\x20\xa7\xad\xd1\xc2\xbc\x73\x2d\x39\x5f\xeb\xb8\x38\x57\x98\x44\x95\xed\x83\xe7\x7c\x53\xd2\x36\x24\xce\x09\xd3\x07\x21\x33\x91\x90\x5d\x1f\x83\x62\x79\x0a\x6d\x87\xce\x0f\x9f\x70\x0a\x81\x73\x41\x3c\x5c\x32\x93\xe5\xe4\x1b\x03\x94\x45\x4c\x56\x19\xcb\xb2\xbd\x7e\x77\x64\x5d\xb2\xd4\x30\x90\x96\x8d\x3f\xd2\xcc\xf3\x78\x5a\xd5\xa1\x94\x98\x56\x24\xb3\x4e\x9c\xf4\x2e\x32\x29\xb9\x48\x5b\xf8\xf5\x5d\xcd\x1a\x32\x4f\x7d\xaf\x46\x6d\xbc\x24\xea\x58\x1b\xa2\xd0\xae\x84\x71\x6f\x16\xfa\xf2\xd2\x02\x39\xb2\xb6\x06\x83\xc9\x8d\x65\x5b\x9b\xf8\xb7\x41\x62\x68\xfc\xbc\x3a\xb3\x3c\x96\xbd\x54\x0a\x2e\xbd\xcb\xa6\x76\xea\xab\x88\x44\x7a\xe6\x7a\x6f\x19\x03\x7d\xf0\x9b\xd3\x8a\xe2\x71\x08\x12\x51\xe0\x05\xdc\x46\x4c\xba\xe5\x91\x9a\x03\xaf\x3a\xd3\x2f\xfc\x8a\xab\x73\xe3\xf6\xe4\xe9\xd3\xbe\xf8\xd4\x64\x59\x7b\xe0\x38\x59\xde\xe8\x3d\x53\xf5\x9e\xc7\xce\x39\x61\x10\x56\x5c\x07\x8d\x65\x49\x77\x94\xb9\xf6\x8c\xbc\xf5\xe1\x01\xf2\xd6\x7d\x56\x50\x9a\xcc\x5d\x50\xc8\x3c\xd2\x92\xf7\xf8\x8b\x4f\x47\xaa\xf7\x21\xb5\xb1\xa6\xf4\xc8\x57\xd6\x80\x06\x2b\x8b\xd4\x73\x65\x5c\x2e\xd4\x2a\xb3\x36\x11\xe1\x98\x89\xb9\xa5\x4e\x3a\x09\x28\x24\x0d\x5c\x98\x62\xf4\xe4\xa8\xbe\x13\x5e\x12\xf5\x58\x26\x93\x4e\xed\x6b\x8a\x78\x6c\x36\x6d\x5f\xe3\xd6\x4d\xee\x62\x1f\x95\xdc\xe0\x19\xec\x75\x87\x34\xd4\x4b\x56\x64\x08\xe3\x7e\x53\x99\x49\xc9\xdb\xc0\x8c\xc4\xd3\xa2\x2f\x05\x1f\x8a\xf3\x5e\x62\xbf\xcb\xc5\x78\x89\x94\xa5\x63\x80\x0a\x3a\xbc\x7f\xd1\xb7\x9a\xf9\x5f\x24\x00\xc8\xa1\x8e\x0d\x6c\xbb\x2c\xe5\x88\xa1\x11\x17\x7f\x65\xe4\xc0\xa7\x65\xdf\x07\x7e\x39\x3a\x93\x1e\x83\x4f\xef\xa9\x93\xcd\xa6\x0d\x36\xa7\xb9\xb6\x89\xd2\x51\x11\xdf\xce\x5a\x51\xa6\xaa\x21\xfb\x22\x99\xf8\xc9\x4c\x1f\x7e\xad\xfa\x44\xa4\xc4\xa7\x18\xac\xa0\x32\x48\x61\x08\x07\x9b\xae\x63\x31\x4a\x36\x22\x78\xd2\xf1\x43\xb7\x33\x76\x37\x3b\x63\xd1\xa8\x19\xf0\xb8\x26\x62\x6b\xd9\x88\x53\xf5\x65\xc5\x22\xc8\x06\x30\x9f\x54\x00\x4d\xed\x11\x4e\xa0\x20\xec\xde\xf4\x9e\x24\x2e\x99\x3a\xbf\x43\x38\x75\xb6\x07\x10\x4c\x15\xd1\x72\xa7\xce\xd6\xef\x90\x4e\xef\x99\xee\x3c\xb7\x58\x2a\xf2\x9d\x5f\xe8\x54\xa3\x05\x16\x39\xcd\x88\xa4\xd5\xac\xe7\xd1\x74\x25\xeb\x79\x4e\x35\xd8\x39\xde\xfb\xbb\x11\x6a\x68\xdd\x31\x32\xaf\xde\x54\x1b\xc5\xee\xa3\xcb\xc4\x24\x00\x4d\x1b\x74\x3c\xcf\x36\x32\x9a\x4e\x73\x7d\xd1\x3b\x89\x01\x6f\x33\x34\x3a\x16\xa3\x44\x61\x1f\xbd\xc3\x52\xdf\x8e\x4f\x02\x38\xcd\xfe\x4a\x8f\x1c\x7b\x6b\x22\x2d\x77\xe0\xe7\x7a\x1e\xc6\x16\xdc\x2c\xdb\x70\x73\x38\x2d\x56\xd5\xf7\x48\x58\x60\x62\x77\xe9\x7d\x52\xbc\x0f\xca\xf7\x4a\x10\x9a\x04\x90\x18\x6c\xce\xa7\x88\xcd\x33\x54\x8f\xe6\xab\x91\x37\x4b\xa6\x15\xc4\x2e\xf3\x1e\x22\xaf\x40\xec\x3f\x7c\xc2\xd0\xd3\xa4\x44\xee\xc6\x8d\x58\x2f\xaf\x37\xd5\xc8\x9d\x55\x90\x7b\xb2\x1e\xb9\x27\x53\x3a\x52\x5f\x18\x52\x3b\x31\xc8\xdd\xf5\x50\xd3\x95\xe2\x77\x15\xb7\x82\xcc\xa1\x97\xa5\x16\xa4\x5c\xed\x29\x85\xa9\x47\x52\x6c\xf4\x6f\x24\x03\x02\xb1\x40\x4c\x9b\x49\xc1\x82\x82\xdf\xce\xf5\x18\xf8\xf5\xa6\x90\x39\x0f\x50\xd6\x96\x10\xf7\xbd\x3d\xaa\x24\xef\xcf\x41\x6e\x8f\xea\x29\x29\x59\x31\x2f\xd9\x1a\xd6\x05\x63\x68\x35\x88\xb8\xe8\xa5\xc4\xa6\x60\xbd\x8e\x51\xef\xa3\xb9\x7e\x2d\x99\xa1\x79\x8f\x00\xcf\xab\xba\xbb\x89\x51\x29\x34\x4b\xf8\xbb\x9c\x50\xb7\x9d\x13\x62\x0d\x9c\x50\x9d\xff\x89\xa6\x28\x0a\x03\x2e\x43\x45\xfb\x0b\x5c\xc9\x10\xf7\xa7\x44\xfb\x4a\x3c\xec\x5c\xf3\x79\x27\x48\x44\x31\xe9\x5c\xdf\x60\xf4\xfd\xff\xa6\xee\xfe\x25\x3a\xe7\x7a\xcb\x4a\xee\xf2\x9b\x95\xb7\x86\xbd\x20\xdc\x21\x99\x13\x53\x85\xf1\x62\x27\x53\xf8\xaf\xba\xca\x8a\xcf\xf0\x92\xf1\x24\xe2\x92\xf7\xc6\x3c\xce\x3a\xd6\x06\x21\x59\x9f\x6d\xfd\xfa\x95\xf5\xdd\xd7\xf4\xe1\x43\x75\xf4\xac\xf4\x32\x99\x29\x5a\xa7\x18\x68\x8f\x24\x78\x6e\x28\x04\xfa\xb1\x72\xa1\x21\x57\x29\xa0\xea\xb5\x54\x87\x64\x53\x85\xe3\xb4\x62\x30\xa7\xcb\x6a\x65\xe5\x82\xc2\x74\xea\xfc\xec\xda\x4f\x16\xd0\xbd\x83\xf8\x36\x12\xd9\x52\x32\xb4\x9b\xca\xd7\x49\x7b\x27\x8c\x8c\x3d\x22\xfa\xde\x97\x15\x7d\x50\x9c\x93\xe9\xca\x2d\xe5\xd4\x64\xcb\xba\x6c\x20\x84\x1a\xe9\xc7\x0a\xed\x97\x09\x9b\x5b\x93\x6b\x5e\xcb\x3b\x52\xb4\xc6\x7d\xef\xcb\xa2\x10\xf2\x3e\x4b\x1d\xec\x40\x2c\xeb\x8a\x57\x25\x69\xc5\x2a\xaa\xad\xc5\x49\x87\x02\x2c\xc9\xdc\x77\xb1\xcf\x6f\x30\x93\xd0\x90\xe6\x6b\x51\xb9\xc1\x10\x3c\x62\x7a\x01\x5b\x84\xdf\xe5\x2d\x56\x4b\x36\x35\xf7\xb9\xa8\xd8\x10\xbd\xed\x35\x4b\x5c\x82\xe8\xa1\x31\x06\xb4\x3b\xd6\x86\x36\x33\x43\xf3\x1f\x51\xc1\x82\x93\x69\xbb\xb5\x92\xbb\xff\xeb\x97\xe8\xbb\xaf\x47\x0a\xb0\xa5\x23\xa8\x42\x8c\x98\x0c\x6c\x0f\xf5\x8e\x9a\x86\xa3\xe9\x65\xf4\x97\x08\xb9\x34\x84\xdc\x2b\xc8\xb6\xbc\x7f\x7b\xa9\xdb\xae\xdb\x0a\x5c\xa1\xce\xea\x79\xc3\x93\x85\xa7\xea\xb2\x3c\x55\x97\xfa\x54\xfd\x0b\xbb\x54\x72\x06\xf5\x04\xb3\x8a\x0d\xe0\x48\xdf\xa6\x53\x24\x86\x17\xde\x0a\x53\x50\xa1\x9e\xe1\x94\x8e\x26\x1e\xf1\xa8\x6d\xd8\x01\xf5\x8b\xe9\x5f\xc1\x94\x8e\xc6\x9e\x96\xb5\x39\xb4\x05\x7a\xd4\x30\x9c\x51\x4a\xed\xd7\xee\xa2\xb0\x01\x28\xc1\xc5\xf8\xf8\x5b\x60\xb9\x51\xe2\x5d\x97\xea\x5b\x83\xef\x87\x83\xc1\xff\x55\x2a\xa5\x5a\x50\x4c\x67\xe9\x57\x4f\x84\x17\x97\xb2\x44\x3b\x5d\x14\x4b\xa5\xc6\x37\xf6\x8c\x5c\x4e\xd1\x7d\xc0\x7d\x41\xcb\x8b\x7b\x60\xe0\x2d\x28\x5c\xac\xa1\xc1\xef\x34\x0f\xc9\xf4\x2d\x7f\x57\x5f\xf3\xef\x68\x17\xe0\x13\x40\xc7\xa6\x37\x3a\x46\xde\x6b\xbc\xb1\xf0\x7e\xac\x57\x2e\x7b\x4c\xf8\x9d\x2a\xf9\xad\x17\xf6\x2e\x39\xf3\xab\xcc\xfc\x65\x15\xc0\x3a\x0a\xc8\x24\x73\xd3\x4e\xa5\x2e\xbe\xc8\x1b\xdc\x90\x57\x3e\x0c\xc0\xd3\x28\x64\x9f\x61\x52\x48\x73\x12\xdf\x18\x43\x49\x45\x26\xb0\xde\xb0\xb1\x9e\xe8\xb3\xcb\x91\x85\x26\xbe\x1d\xa2\x39\x02\x6a\xd9\xfa\x45\x2e\xee\x79\xfe\x52\xd6\xc9\x19\x99\x4c\xb5\x1e\x99\x51\x0a\x73\x72\xeb\x43\x2e\x30\xfa\x16\x88\xfe\x17\x38\x65\xf9\xfb\xf2\x72\x0f\x44\xff\x08\x3e\x16\x05\xfa\xc6\x0d\x44\xff\x18\x76\x59\x4e\xce\x56\xd6\xc7\x40\x8c\x9e\xad\x37\x2a\x87\xc2\xf3\xa1\xf8\x53\x54\x6a\xb3\x4b\xdc\xf8\xbc\x58\xe6\xc5\x72\x0a\x3f\xbd\x73\xfb\xc1\x00\x41\xb4\xfa\x99\xd3\x36\x9e\x2e\x17\xa4\xcb\x8b\x92\x15\x03\x99\x2d\x4d\x55\xcc\x7d\xc7\x26\x30\xfb\x41\xbc\x30\x64\x32\x1e\xad\xb3\x34\xd8\xaa\x4a\xd9\x2f\x93\x28\x62\x93\x94\x77\x58\x14\x19\x05\xb9\x45\xbf\xdb\x6b\x2c\x07\x96\x9a\x6b\xf3\xca\xe5\xc6\xf9\x04\x43\x1f\xe6\x64\xd7\x87\x04\x42\xc5\x3c\x49\x83\x7b\x67\x5a\x2b\x77\x39\xb4\x28\xcc\xa7\x4b\x16\x59\xb3\x69\x69\x91\x15\x27\x32\xd7\xd4\x9b\x1e\x6f\x74\x43\x44\x32\xa9\xb6\xec\xd0\x1a\xbd\x4b\x39\x8e\xde\x24\x8a\xcc\xee\xdc\x53\xc4\xbd\x4b\x53\x2e\x98\x1f\x26\xff\x92\x9a\x5c\x20\x16\x8c\x11\xfd\xc9\xff\xa4\x6a\x7c\x41\xe1\x68\x6a\x4c\x76\x8f\xa7\x26\xec\xe4\x6b\x7c\x18\x0e\x16\x70\x86\x4f\xbf\x2f\xe0\x0a\x1f\xb6\x17\xf0\x72\xda\xea\x0c\x5c\x11\x9a\x07\x7f\x38\x18\x85\x5c\x47\x20\x57\xf2\x8b\x59\xd6\x77\x92\x64\x1a\xc5\x79\x4e\x0c\x89\x23\x20\x74\x24\x04\x0e\x07\xd7\x31\x17\x5a\x19\x85\xd4\x31\x81\x86\x05\xe6\x2e\x7e\x1e\x3b\x1e\x08\x27\x01\xe9\x84\xc0\x9d\x00\x32\xc7\x05\xe6\xa4\x4a\xc2\x3e\x9f\xae\xf5\x0f\xeb\x04\xe4\xe5\x14\x3d\x46\xbe\x49\xcc\x2b\x84\x6e\xee\xa7\x30\x27\x5f\x53\x0c\x5b\xa8\x83\x2c\xee\x4d\xdb\x0c\xf2\x4c\x4e\xb5\x5a\x72\xcc\xe1\xf6\xf6\x80\xd2\x95\xf4\x59\xb5\x28\x01\xdb\xf5\xc4\x64\xb5\xd0\x10\x4f\x6a\x3e\x8f\x83\x7a\x3a\xb2\xba\x71\xdf\x26\x66\x04\x69\xa3\x07\x13\x8c\x99\xce\x9d\x29\x39\x9f\xc2\x70\x08\x18\x73\x57\x02\xc9\x9c\x1d\x45\x13\xf7\xa6\x26\x62\x70\x11\x4d\xf8\x69\x4f\xfb\xb1\x8d\x32\x7b\x88\x3a\xe6\xa7\x76\xd6\x1b\xd2\x3c\xb0\x30\x64\x4e\x19\x69\x38\xa6\x10\x3b\x24\xae\x76\xa5\xd5\x0c\x45\x68\x62\xd3\xd7\x53\xc7\x71\xe2\xd1\xc0\x8e\xb1\x4f\x7c\x32\x7d\x66\x14\x96\x86\x16\x57\x55\x0b\x72\x79\xd8\x19\x48\x8a\xb6\x9f\xbb\x24\xdf\xa3\x8f\xad\x5c\x18\x46\x1d\xf9\xe3\xe9\x48\xd8\x33\xf2\x71\xaa\x44\x18\x17\xd3\xe0\xc1\x8c\x7c\x4a\x40\xe8\x53\xe9\xeb\x77\x46\xef\xb6\x8b\xf0\xbc\xb5\x80\xd3\xe9\x9d\x96\xa8\xbf\x7e\x19\x9f\x39\xe3\xc9\xb2\x14\xbd\x75\x41\xe1\xdd\x52\x17\x12\x93\x04\x19\x4b\x81\x0b\x72\xe4\xa9\x4d\x39\xf2\xd4\x21\x18\x59\x78\x0d\x3e\x4e\x62\x79\xa9\x84\x17\xc8\xda\xef\x46\x8a\xa8\xc0\x71\x11\xa8\x56\xd0\xb5\xc1\x92\x1f\xc4\xfd\xee\xc3\x87\x43\xb4\x52\xcf\xd0\xa0\x83\xd3\x91\xb0\x2d\x6b\xa1\x99\x48\x1c\xf0\x15\x58\x1d\xae\xb0\x24\x2a\x98\xf0\x95\x00\xab\x33\x4e\xb2\x94\x27\xe8\x12\x83\x7a\x22\x2c\xb8\x05\xab\x63\x64\x63\x88\x9b\x83\xac\xb1\x5b\xec\xb4\x2d\xc0\x9a\x65\x15\xf7\xb3\x2e\x5a\x7b\x9d\x13\x33\x36\x1c\x1d\xa8\x1f\x02\xc3\xaf\x63\xa0\x69\xf6\xeb\xd7\x26\x06\xcb\x75\xcb\xf6\xe6\xa1\xe3\x72\x39\xe3\x3c\xb6\x16\x84\xe6\x7c\xf5\x39\xc1\xdc\x0b\x8a\x3e\x9f\x4e\xa1\x35\xff\x5a\x08\xe8\x8c\x5e\x0e\xb6\x51\x9d\xe5\x33\xc9\x3b\x2e\xf3\xae\xad\x0d\xc2\xfa\x4c\xfd\xe3\x6e\xc4\xb4\x51\x92\x54\x55\x03\x91\xc4\xd2\xda\x48\x36\x48\xb8\x41\xbc\x0d\x13\x0d\xce\xf7\xc1\x2a\xd7\x12\x52\xf4\x3a\xcf\x13\x23\x2b\x02\xbc\x3b\x2d\x71\xf0\x99\x02\xa5\x0f\x18\xef\x9c\xd3\x8a\xb8\xf1\x69\x2d\x59\x5a\x62\xdb\x4e\xf3\x3b\xfd\x77\xd3\x3c\x57\x98\xe9\xe4\x70\xda\xea\x39\xba\x8f\x62\x02\x06\xcd\x52\x3f\x3f\x4e\x41\xc2\x29\x6d\xbb\x3e\xba\xca\x52\x19\x06\xf3\xe2\xda\xa6\xae\xcd\xad\xd8\x87\x71\x7e\x5d\x61\x0f\x71\x64\xc7\x1e\x1c\x9a\x88\xda\xde\xcd\xe8\x4f\xeb\x28\x53\x52\xc3\x07\x94\x1d\x8e\x33\x25\x26\x7c\x55\x4c\x95\x75\x7c\x99\x59\x60\xbd\x11\xa1\x05\xd6\x11\x93\xd6\x77\xfb\xcf\x7b\xd4\x42\x57\x0c\x63\x1a\x5b\xdb\x20\x1c\x2f\x0b\x63\x73\x7b\x5a\x6e\x4c\x26\x71\x5f\xae\xa6\xb4\x62\x12\xf1\xa9\xbe\x7c\x27\x88\x1f\x06\x0b\xb8\x9d\x9a\x20\x9c\xaf\x34\x3f\xe1\xaa\xfe\xde\xea\xe7\x89\x45\xe1\x4d\xfb\x66\xfd\xec\xda\x4f\xaa\xd9\xad\x31\x10\xc6\xda\xea\x8f\xeb\xd5\x3f\x98\x3b\x49\x29\x92\xf8\xc2\xa2\x6b\xf2\x61\xaf\xc4\x21\x31\x29\xd5\xe2\x7e\x77\xf4\x36\xb1\x53\x4e\xb5\x07\x9e\xc2\x8e\xbc\x48\x42\xd8\x6e\xd2\x90\xa7\x4d\xaa\x64\x4c\x22\x96\x57\xc4\xe2\xf3\x92\x78\xca\x85\xec\xa4\x52\x84\x7a\x64\x53\x7d\x73\xfb\xde\xf0\xb3\x73\x0e\x9e\xa0\x14\x5e\x4c\xdb\x7c\x57\xeb\x80\x26\xc3\x31\x9f\x84\xde\x75\x15\xb8\x3e\x14\xda\xad\x34\x73\xaf\xb8\x27\xab\xd6\x32\x23\xeb\x75\xec\x5b\xb6\x75\x94\xeb\xef\x96\xa1\xe0\x32\xc9\x44\x93\x26\x34\x9b\xf4\xb4\x49\xbc\xb9\xab\x28\x20\x5a\x9d\xcf\x39\x79\xa3\x8e\xc3\x00\xd6\x18\xc5\xb2\xf8\x22\xe2\x3d\xa3\xf6\x3e\x35\xdf\x7d\x67\xee\xfd\x15\xa0\xb9\x51\x26\x2c\xbd\xd0\x33\x72\x80\xfd\xa9\x05\xa2\xc8\xe4\x55\xf0\x68\x18\x10\x99\x63\x53\xd1\x67\xd7\xd5\xd8\xa3\xd6\x00\x2d\x8c\xf2\x54\x0a\xda\x71\x02\xab\xed\xdc\x59\x8d\x50\x0a\x67\x1e\xd9\xd4\x5c\xe7\x34\xe4\xb3\xfa\x54\x73\xae\xb1\x72\x41\xa4\x24\xd5\x7b\xac\x4b\xef\xce\x85\x51\x3d\x95\x4b\xb3\x22\x24\x25\xd1\xea\x17\x4a\x6d\x8c\xbd\xba\x8f\x4a\x12\x91\xfc\xef\xee\xe4\xf0\xdf\xbc\x93\xc3\xbf\xbf\x93\x3f\xee\xb7\x93\x3f\xd6\xee\xe4\xdf\xdf\xbb\xe1\xbf\xbc\x77\xea\x84\xb6\x40\x93\xde\x3e\x12\xb7\xa3\x27\xed\xc0\xd9\xa6\x93\x51\xe8\xc6\xb2\xec\x19\x99\x27\xf0\x3b\x48\x64\x7b\x16\x06\x49\xc9\x91\xe1\xb8\xae\xd5\x3f\x57\xa8\xf5\x56\x4f\x3b\xea\x9f\xdb\x82\x72\x22\xab\x11\x2f\x28\xfc\x98\x36\x85\xb4\x37\xee\x48\xe5\x85\x7f\x83\xa1\xe1\xb8\xe3\x5e\xf4\x02\xe6\x73\x7f\xd5\xf0\x70\x13\x2d\x9f\x5b\x0c\x35\x25\xbf\x91\xeb\xac\x34\x97\xcb\x1b\x4d\x34\x75\x95\xe5\x6b\xd8\xe7\xf1\x72\x2c\xec\xe0\x6f\x0e\xf9\xa4\x6a\x92\x5b\xce\xbb\xf1\x9e\x40\x94\x8c\xc8\xe7\x69\x6b\x42\xd9\x36\x63\x62\x1d\x05\xc9\xd1\x51\x90\x30\x9a\xa8\x8b\xd6\x9d\xbb\x84\xc8\x3b\x2c\xe3\x73\x13\x3f\x34\x72\x97\x7d\x77\x95\x4f\x5b\x6f\xe5\xfe\xf7\x6c\xd8\x2b\x16\xeb\xc2\x58\xac\x0b\x63\xb1\x7e\xec\x23\x53\x50\x58\xa2\xa3\xcb\x42\xcd\x12\x5d\xc9\x00\xda\x62\x79\xd9\xef\xe3\xfe\x16\xcb\xa2\xdd\x62\xf9\xc7\x14\xdd\x46\x78\xbe\x1f\xef\xa7\xed\xc7\xab\x62\x5d\xc9\x06\xfa\x73\xe5\x97\x3e\xa3\x6e\x41\xdb\x26\x7e\x9d\xde\xc7\xf7\x6c\x25\xc9\xcb\xca\x22\x9a\x85\x93\x7d\x77\xf4\xe7\x8c\xbc\x9d\xe6\x5c\xe9\x87\x8a\x56\x66\x27\x0f\x92\xaa\x2f\x99\xec\x0e\x3a\x01\x7e\x15\x98\xe0\xa4\x40\x30\xef\xa7\xe8\xee\x81\x6a\xa4\xe6\x7e\x3e\x26\x65\xbc\xd5\xca\xf5\x97\x39\xf7\xcb\x42\xb8\x5e\xc7\x7a\x1e\xeb\xd2\x4f\xae\x75\x56\xda\xea\x13\xff\xed\xcd\x98\x88\x91\xaf\x59\xb6\xcd\x55\x2c\xd9\xb7\x69\x7b\x2c\xc1\x06\xef\xbd\xea\x70\x06\x65\x70\x23\xcf\x87\x97\x1e\xcc\xc8\x57\x3d\xf7\x7a\x06\x1f\xe3\xc5\x57\x97\xff\x99\xaf\x46\x10\x77\x1d\x6b\x2c\x7b\x8f\x2d\x10\xdd\x15\xf3\x82\xd5\x89\xed\x92\xb8\x0b\xc6\xfb\x65\x64\x75\x96\x2e\x61\x2d\x5a\x33\x0c\xbd\x9e\x36\x9b\x56\x55\x18\xb2\x62\x57\x3e\x98\xe4\x82\x1d\x4d\xb3\x71\xc3\x71\x73\xca\xcd\xca\x0d\xbf\xd3\x8a\x75\x8e\x56\xcf\xbe\xc8\x2a\xba\xce\x21\xa2\x7a\x58\x26\xa8\x5a\x14\x2c\x10\xb6\x59\xc8\x82\xc6\x36\x6d\x61\xd5\xbe\xa0\x17\x70\xee\xa3\x40\x57\xd3\x7a\x2d\x88\x39\x4e\xb2\x6b\x58\x7b\xde\x75\xd6\xaa\x55\xcd\x2d\x35\xe2\xd1\x13\xb7\xc2\xad\x7c\x16\x9a\x1e\xee\x94\x30\x09\x59\xf7\x3e\x87\x54\xe1\xa7\x8e\xb5\x11\x77\x4d\x6f\xbc\x5b\x73\xe3\x40\x96\x00\xbd\xba\xb9\x65\x5b\x5f\x30\x73\x64\xa3\x65\xcf\x38\xea\x6d\x76\x56\x6f\xf7\x4f\x18\x91\xdd\x95\x99\xc7\xcb\x8c\xc3\x52\x5b\x03\x14\x5a\x4f\xbc\x74\x41\x86\xea\xd3\x82\x2a\x30\x5c\xb6\x67\x0b\xf0\xba\x46\x87\x92\x74\xef\x48\x51\xc2\xfa\x8c\xe7\x91\xe6\xbc\x4a\x94\xf0\x26\x39\x5c\xdb\x0f\x69\xfe\x22\x89\xb2\xb1\x4e\xa0\x60\x4c\xd9\x2b\x46\x03\x1d\x6b\x43\x56\x81\x77\x15\x6d\x88\x12\xc5\xe0\xad\xa9\x98\x2a\xa1\xfe\x0c\x95\xac\xfa\xc2\x29\x5b\x35\x4b\xe9\x2c\xc1\x50\xb1\xf5\x39\x4b\xb4\x72\x90\xef\x3b\xf2\xff\x99\xd1\x56\x06\x96\xa3\x0a\xed\xbd\xe2\x35\x1f\x90\xbb\x57\xd7\xf8\x26\xfd\x27\x16\x37\x07\xb8\xd5\x8b\xb4\xbb\xce\x71\x92\x43\xe3\x82\x42\xd8\xbd\x43\x5b\x52\x11\x31\x2b\x07\x2e\x20\x49\x17\xde\xf9\x60\xa4\x47\x50\x82\x49\x2f\xba\xe8\x6d\xa3\xb9\xe3\x13\x13\x39\x66\xdf\x83\x1b\x8f\xc2\x8d\x47\xbc\x2e\x85\x98\x42\xd1\xec\x95\x49\xef\x51\xb6\xdc\x5a\x6a\x79\xbe\xd4\x52\x56\x5a\xbe\x46\x73\xf0\xc6\xcf\xed\x2d\x35\x12\x0d\x8b\xa2\x37\xca\x34\x1e\x9a\xc6\x2b\x3b\xa5\x8e\xed\xff\xf3\x7f\x5b\x2b\x8e\x3b\x4b\x7b\x51\x47\x73\x06\x77\x74\xf4\x65\x79\x6f\xc9\x1a\xe1\xc6\x23\xac\xdb\x2e\x3c\x78\x2c\xe2\x8a\xeb\x5c\x12\x1d\xbe\xd3\xbb\x9c\x26\x42\x01\x8f\xfe\x9b\x24\xf1\x2f\x35\xae\x1d\xac\xd1\xa5\x8f\xc2\xbe\xe4\xa9\x24\xb1\x13\xd3\x91\xe5\x33\xc9\x7a\xd6\x46\x6c\xc7\xf0\xe8\xbf\xff\x99\xfe\x46\xae\xd8\x94\xe9\x0b\x0d\xfb\x97\x2a\xb4\x15\x13\xfb\xcf\x47\x97\x72\x1c\x15\x4d\x85\x23\xd0\xe4\x08\x43\x76\x07\x0a\xb9\x7b\x02\x74\x57\x17\x82\x8d\xc7\xe7\xdc\x0f\x31\x58\x46\x1e\x31\x0a\xdc\x6e\x9b\xd7\x70\xee\x2e\x2c\xfe\x19\xff\xfa\xa7\xf8\xf5\xcf\x58\x7b\x0d\xa7\x5d\xed\x1b\xca\x6f\x24\x13\x9c\x59\x14\xa2\xee\xda\xbc\x53\x98\xd5\x7f\x0b\x86\x8f\xe1\xab\x20\x6e\x57\x67\x31\xc5\xeb\xfd\xec\x7f\x06\x39\x8a\xf5\xe7\x37\x2e\xcf\x6f\xda\x35\x07\x38\xcb\x0f\xb0\xc4\x03\xcc\xef\x8d\x1d\xe1\xa3\x47\x98\x5a\xea\x7f\x03\x9a\xfc\x1f\x19\x77\xd3\x10\xff\xdd\x08\xf3\x3f\xb0\xe0\x05\x89\x5e\x9a\xcc\xdf\x46\x9d\x7e\xb7\xd1\x23\x46\xc7\x7f\xe5\x7d\xf6\x18\x30\x5f\x5a\x17\x3c\xf5\x67\x00\xe8\x14\xf9\x11\x42\xf5\x6b\x0f\xb3\xa8\xb1\x57\x18\xc2\xaa\x3b\x3a\x27\xd6\x47\x3e\xcb\x93\x20\x28\x74\xf6\x1a\x1d\x7d\x30\x42\xad\xf5\xda\x0f\x65\x59\xf6\x96\x13\x13\x30\xaf\x1a\x90\xac\xe5\x66\x37\x17\xb0\xe6\x24\xec\x42\xd6\x77\x0f\x20\xeb\xb3\x53\xc8\xfa\xa9\x9a\xb7\xe8\x82\x07\xac\x82\x5c\x8b\x38\x38\x71\xd7\xe8\x21\x0b\x94\x7a\x3c\xc5\xf4\x9c\x27\x14\xa6\x24\xea\x82\xf5\x32\xcf\xaf\x9d\x57\x9d\x15\x55\x8f\x74\xd5\xe3\xa5\xa5\x2d\x48\xc7\x9c\x7c\x9b\x02\x87\x44\x07\x5d\xca\xba\x28\x25\xaa\x31\x5e\x29\xa2\xa7\xd1\xde\xb4\xeb\x70\x01\xdd\xae\x93\x09\x18\x37\x22\x97\x58\x90\x40\xa0\x9a\xc3\x44\x94\xb8\xec\xde\xd3\x8c\x79\xd2\x5a\xd1\x63\x28\x46\xee\x08\xde\x99\x27\x59\x27\xcd\xcc\xc3\x8c\xc5\xb2\x23\x93\x8e\x4e\x8f\xbe\xc4\x92\x8f\x2c\x0a\x6c\xdb\x5e\x6f\x2a\x7a\xc2\xc8\x01\x27\x33\x72\xa9\xe7\x5a\xb5\xf0\x7b\x99\xc4\x41\x28\xc6\x9a\xd6\xb8\x3f\xec\x03\x4e\x8e\x13\x0a\xde\x13\xdb\x7a\xad\xbf\x96\xef\x3b\x26\x90\x59\xc3\x1c\x37\xe9\x25\x8c\xb9\x61\xca\xa3\xc0\x68\x9b\x4a\xc3\x7f\x73\xac\x36\xd0\x1c\xfc\xdc\x37\x31\xf4\xd0\x66\xa0\xf5\x2b\xe8\x60\xb0\x41\xac\x87\x26\xde\x97\x76\x8e\xd6\x82\xf1\x5c\xa3\xf4\xcb\xc7\x16\x85\x9b\x35\xe3\xfc\xdf\xe9\x71\xbf\x1a\x62\xca\x64\x70\x58\xa5\x27\x17\x5d\x64\x42\x52\x0b\x44\x9f\x9d\xd6\x45\x4f\x5d\x8e\x5c\x11\xd6\x70\x0f\x56\xc4\x50\xd3\x85\xc9\xb5\xaf\x7b\xc1\x08\x55\xa8\x22\x71\x7d\xf8\x8b\xee\xe2\xcb\x21\xe4\x65\x05\xba\x4e\x8c\xc6\x93\x52\x38\xf5\x88\xa0\x8d\x91\xd6\x97\xe6\xb9\x5e\x0f\xb0\x4e\xcc\x5b\xf1\xca\x37\x60\xbf\x8b\x4e\x29\x95\x41\xe9\x89\x5b\x4b\x44\xe4\xdf\xd9\xff\x2b\x1e\x71\xad\xf0\x56\xab\x2a\x56\xd4\xb0\xa5\x5e\xe7\x93\xa7\x36\xe8\x9a\x52\x38\xf4\x48\x3c\xc2\xbb\xe9\x49\x17\x04\x3c\x18\x52\x6a\xef\xc8\x42\x2d\x2a\x17\x14\xae\xbb\x0d\xc1\xfa\xcc\x1a\xca\x9a\xea\x87\x57\xe2\x09\x62\x48\x0b\x13\x51\xb4\xcd\xa0\x8e\xcd\x17\xda\xc2\x74\xad\xe9\xa8\x7b\xb2\x30\x26\xa3\x78\xe1\x49\x29\xba\xd8\x8f\xbb\xab\x3e\xcb\xe5\x14\x1b\x3a\x3a\x57\x5b\xae\x80\xec\xa6\x9b\x47\x0a\x50\x2f\x28\x9a\xf1\x2e\x30\x68\x8c\x71\xe4\xcd\xa1\x2a\x37\x0d\x5a\xd5\xb2\xc8\x56\x2d\xcb\x4e\xb7\x9a\x43\xe4\x68\x8d\x94\x81\xb6\x6a\x32\x0f\x4b\x7a\xe2\xc1\x01\xaf\xeb\x32\x44\x71\x39\x50\x09\xd2\x98\xf9\xe4\x1d\x23\xb7\x1e\x6a\xc3\x96\x4c\x78\x5c\xe6\x5f\xf0\x0e\xfe\xdb\x9b\x84\x51\x94\xcc\xcc\x0f\x33\x52\x83\x06\x10\x4f\xca\x64\xb2\xe4\x88\xa5\x2f\x96\x63\xc3\x82\x2f\x5a\x3e\xf7\x1d\xef\x16\x16\x14\x8e\x57\x55\x48\x61\x40\x74\x72\x8a\x8a\xb2\xbc\xc5\xcc\xaf\x40\x5c\xe6\x1e\xf5\x48\x01\xdf\x40\x2d\xde\xaa\xf2\xfa\x5e\x5d\x34\xc6\xa7\xc0\x5e\xe3\xbe\x77\x05\x0a\x86\x8a\x7b\x81\xd7\xff\x9b\xe9\xe0\x07\xbc\xfa\xc7\x0b\xdd\xbe\xeb\x2f\x5a\x29\xe2\x59\xd7\x18\x56\x5d\x69\xf2\xe3\x5a\x14\x5e\xde\x4b\x85\x54\xe1\x43\x45\x32\x6b\xf2\xf0\x56\xd2\xe1\x26\xca\x88\x58\x77\x29\xfa\xc2\x8c\x5c\x75\x9b\x78\xd3\xa5\x9b\xba\xde\x70\x70\xb7\x43\x97\x28\x64\xbd\xf3\x66\x7e\x52\xb4\x86\x5f\xf8\x50\x44\xe5\x37\xd1\x5f\xbe\xea\x7b\x78\x7a\x07\x53\x58\x7a\xfb\x41\x2d\xe0\x42\x61\xb8\x7a\x8a\x4c\xf5\x7f\x9a\x5c\x7c\xe0\xe4\xac\xfb\x3f\x45\x2b\x56\x3a\x5f\x26\x14\xb2\xdc\xc4\x97\x5d\xb0\xde\xbd\xb2\x4c\x34\x33\xbf\x78\xa7\xa9\x7a\x87\x49\x2c\x3a\xc7\xd2\x03\x5a\x14\x2b\xa6\x60\xa9\x90\x9d\x96\x85\x5a\x05\xe9\x2f\x37\xae\x94\x6b\x5d\xa5\xdf\x71\xe7\xe6\xbb\xec\xa4\x2c\xcb\x59\x6d\x5d\x70\x5c\x1d\x90\xe4\xf8\x5a\xe1\x25\xb3\x33\x45\x69\xae\x60\xb6\x60\xd9\xd8\xe5\x3c\x81\x77\x9e\xb9\x1d\xa1\x20\x75\xca\x1b\x10\x8a\x08\x72\x4d\x04\x5f\x77\x41\xc2\x83\x41\x41\x04\x17\x14\xf6\x0c\x7b\xb7\x69\x51\xd8\xd7\xcf\x5e\xe2\x2b\x10\xfa\xa8\x7f\x4d\xd4\x3e\xc1\xae\xa9\x66\xec\x58\xe0\x54\xff\x8e\xd9\xd4\xa2\xf0\xae\xeb\xfc\xbc\xb1\xcb\xbc\x06\xe0\x26\xb6\xd1\x79\x5b\x0b\xf8\xa4\x4b\xf5\x5b\x6b\x01\x87\x79\xed\x22\x3e\x30\x96\x1c\xe5\xbf\x16\x70\x52\xd4\x28\x92\x22\x61\x8d\xfc\xd7\x02\x6e\x8b\x1a\x26\x5d\x0b\x96\xeb\xe7\x05\xbc\xea\xb6\xc7\x21\x6b\x04\xfd\x6e\x1d\xec\xdf\x75\x6b\x20\xff\xa9\x9b\x13\x49\xc3\x0e\x18\x13\xc5\x65\xcb\xc4\x93\x6e\x2d\x9d\xe7\x6d\xb7\x96\x6d\xe2\x53\xb7\x0e\xef\x87\xdd\xc5\x42\x5f\x43\x8d\xac\x4e\xe9\xad\xba\xa0\xf0\xf6\x4e\x6e\x7b\x29\x2a\xc3\xc6\x8c\xbc\x32\x08\x78\xd5\xb9\xb8\x12\x0d\xe1\x96\x11\xd1\xbf\xc1\x40\x34\xc6\x5b\xa6\xe6\x3b\xa3\x01\xe3\x0d\x12\xfa\x77\x5d\x38\xe9\xc2\x6d\x17\x0e\xbb\xa0\x56\x3c\x0f\xbc\x37\x11\xc9\x98\xcb\x4b\x9e\xa5\xfd\x30\x79\xe4\x27\x5e\xaa\x37\x3f\x8c\x2f\xf4\xc3\x98\xc5\xec\x82\x8b\x47\x7a\x6b\x76\x79\x34\xb1\x16\xdf\x29\x1c\xac\xc7\xe1\x4b\x96\xf7\x46\x96\x60\xbe\x8f\x4e\x6d\xd6\x36\xba\x93\xe5\xbe\x2b\x26\x46\x8f\x58\xbd\xa5\x85\x0f\x1e\x89\xfb\xde\x69\x1d\x6f\x1b\xc3\xa7\xce\xa4\x22\x56\x1c\x78\x55\x9b\xdf\x0f\x5d\x27\x14\xc4\x12\x18\x7e\xe4\x4b\xb7\x3d\xf1\xfa\x67\x0f\x2c\x5c\x4e\xd5\x07\x46\x76\x82\x0f\x5d\x62\xa5\x72\x1e\xf1\xf4\x92\x73\x59\x58\x57\x45\x09\xf3\xd1\xb2\x4a\x10\x0f\x63\x3a\x17\x06\x71\x5c\x88\x44\x98\xa2\x38\x23\xd6\x1b\x16\x46\xdc\x57\x74\x58\xb5\xe9\xbc\x3c\x3a\xea\x04\x22\x19\xeb\xf4\x45\xd4\xf8\x37\xea\x18\xa3\x87\x31\xf9\xe9\xbd\xb2\xaf\xc0\x3b\xb0\xcf\x19\x78\x87\x76\x23\x63\xd9\x1d\x29\x32\xa2\x89\x2d\x5b\xd8\xea\xc7\x36\x30\x7b\x4e\xbe\x46\x60\xfd\x97\x05\x24\xd6\x59\xd3\xd8\x13\xd0\xef\x46\x96\xe2\x27\x8e\xd1\xe1\xef\x7d\xa4\xd8\x8a\x0c\x76\x15\x69\x98\x8f\xca\x90\x8b\x76\x19\x87\x31\xee\xbb\x9e\xe2\x2f\xfb\x2c\xc3\xf8\xf4\xe0\x6d\xdb\x5d\x49\x7e\x08\x0a\xde\x33\x7b\x3f\x02\xdf\x6d\x18\x99\xe2\x43\x0e\xc9\x9f\xef\xf5\xd2\x23\x07\xb0\x53\x01\x19\x6b\xb1\x58\xd0\xe7\x2c\x71\x7e\x7e\x60\x61\x6c\xff\x0c\xe3\x50\xda\x3f\x04\x39\x0c\x29\x19\xa8\x8f\xc4\xfd\xd7\xd1\x78\x94\xf7\xdb\x31\xe6\x53\x41\x22\x08\xf2\xe8\x9d\x30\xee\x48\x8a\x7f\xc4\x08\x43\x3c\x59\x8e\xc3\x47\x13\xf2\x84\xda\x31\x11\x7f\xf2\xef\x20\xff\xe4\xdf\xa9\xad\x1e\x1d\xf5\xb8\x20\xd8\x25\xb0\x84\xda\xf8\xe4\xb0\x64\x41\x14\x1b\x44\x9f\xff\xbf\x01\x00\x00\xff\xff\x01\x30\xcf\xe3\xfa\xaf\x01\x00"), + compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xdc\xbd\x0b\x7b\xdb\xb8\xae\x28\xfa\x57\x1c\xed\x6c\x2f\x72\x0c\xab\x76\x5e\x6d\xe5\x70\xf9\xa6\xef\xf7\x23\x49\x3b\x99\x66\x72\x72\x28\x99\x76\xd4\xd8\x94\x4b\x51\x71\xd2\xd8\xfb\x6f\xdc\x1f\x74\xff\xd8\xfd\x08\xea\xed\xc7\x74\xf6\x5e\xfb\x9c\x73\xef\x5a\xf3\xa5\xb2\x04\x82\x20\x09\x82\x00\x08\x82\x5b\xc3\x44\x06\x3a\x8c\x24\x91\xf4\xde\x49\x62\xd1\x88\xb5\x0a\x03\xed\xf4\xb2\x0f\x0d\x45\x24\x28\xd0\xf4\x5e\x09\x9d\x28\xd9\xd0\x2e\x67\x12\xb4\x3b\x64\x0a\xf4\x22\x07\x1b\x93\x02\x44\x91\x1d\xd0\x90\xa3\x56\xf9\x87\x72\x6d\x19\x3a\xa2\x40\xd2\xc5\x82\x16\xa8\x06\x44\x94\x50\xed\x82\x28\x50\xe9\x65\x54\x1b\xb1\x0b\xa2\xc1\xe2\x2f\x57\xa0\x49\x52\xaa\x60\x0f\x92\xa2\x02\xb1\x8c\xed\xef\xd6\x99\x10\x01\x79\xad\xe5\x6a\x03\xc2\x4b\xd5\xee\x03\x2f\xaa\x4d\x96\x11\xfe\x0b\x28\xe1\x24\x81\x32\x2d\x65\x62\x04\x09\x4a\xc4\x1c\x40\x50\x10\xc3\x97\x71\xfe\xf7\xd0\x17\x10\x0e\x35\x0a\xcb\x24\x26\x24\x2a\x91\xf8\x10\xa2\x82\xc4\x60\x19\xed\xff\x32\xaa\x23\x12\xc0\x32\xdd\x65\xc2\x39\x09\x4b\x84\x3f\x82\xb0\x20\x3c\x5a\xc6\xfc\xbf\xb3\x2d\x21\x89\x60\x65\x6b\xca\xcd\x89\xc8\xb0\xd4\x9c\xc7\x30\x2c\x9a\x13\x2e\x23\xff\x3f\xac\x85\x43\x12\xc2\xba\x36\x96\x1b\x39\xab\x89\xb9\x1d\xc6\x98\x74\x79\x5f\xba\x43\x62\xde\x7b\xa6\x1e\xa2\x4b\x25\xee\x6c\x09\x28\x88\xdc\xad\x96\x01\x91\x97\x22\xa2\x54\xf0\x36\x2b\x08\x45\x93\xf7\xea\x45\x21\x29\x17\x26\x49\xa9\xfc\x4d\x51\x1e\x8a\x7e\xdc\x5f\xc6\x00\xbc\x86\x83\xf0\x12\x9a\x61\x19\x0d\x14\x63\x74\xb0\x0a\x11\x04\xcb\xa8\x48\x50\xc2\xe6\x57\xb1\x41\xc1\x06\x0f\x57\xe3\x83\x68\x25\x46\x12\x95\x90\xc6\x75\xa4\x50\x30\xdc\xa3\x75\x68\x21\x5c\x87\x98\x84\x74\x71\xc3\x55\x23\x64\x03\x52\xb0\x8a\x1d\xf6\x61\xa4\x88\xf9\x26\xd8\x91\x52\xfc\x8e\x48\x0a\x09\xeb\xf4\x92\x43\xd9\x4b\x5a\x2d\x2a\xce\x93\x0b\xa6\x89\x6a\x25\xb4\x97\xad\x2d\x0b\x0a\xdb\x6c\x5c\xc1\x54\xe0\xd1\x05\x1e\xc1\x3a\x3d\x71\x28\x9b\x4d\xe5\xfa\x3d\xd1\x6a\x51\x7d\x2e\x2e\x98\x72\x39\x28\x66\x5e\xe5\x0b\xeb\x58\xc8\x91\xbe\x62\x02\x2e\xcd\xb2\x45\x17\x14\x26\x8c\xd4\x2b\xc8\x26\xe2\xb9\xbc\x58\x50\xd8\xd4\x90\x0c\x21\x24\x29\x2d\x82\x02\x67\x9d\x1e\x3f\x14\x3d\xde\x6a\xd1\xe4\x9c\x5f\x30\x7d\xce\x2f\x32\x0a\x92\x73\x79\xc1\x14\x24\x0b\x0a\xeb\x9b\xa5\x32\xac\x59\x4f\xe9\x56\x37\xeb\x2b\x5d\xf4\x95\x3a\x4f\x72\xbc\xe2\x5c\x5f\x30\x09\xe2\xd7\xe9\x35\xc8\x04\x22\x53\xcc\xcc\x4c\x7d\x9e\x5c\x80\xca\xbb\x5e\xfd\x22\xa6\x76\xb7\xd7\x39\x64\xa2\x27\xda\xed\x1c\x91\xa8\x21\xa2\xbd\xbf\xd3\xd6\x15\x2d\x95\xc4\xb4\xb5\xc2\x16\xff\xe5\x51\x41\x69\xd4\xe2\x60\x46\x27\xc7\x9c\xac\xc4\x9c\x73\x4f\x3c\x0e\x03\x81\x2d\xf8\x0b\x0a\x54\x41\x81\x6c\x0b\xe0\x19\x1d\x2d\x92\xe4\xd4\x1d\x26\xfd\xec\xd1\x4b\x28\x85\x80\x75\x7a\xc1\xa1\xe8\x05\xad\x16\xe5\xe7\x81\x19\xdf\xe0\xa2\x67\x70\xda\x2f\x49\xf6\xa5\x25\x0c\x4f\x05\xf9\xd8\xf3\x15\xcc\x54\x1a\xc5\x35\x9f\x82\x48\xc6\xd1\x58\xb8\xe3\x68\x44\x64\xcb\xf1\x1a\x4e\xeb\x8a\x50\x0a\x6a\x41\x0b\xcd\xf4\x8a\x64\xf0\xce\x61\x28\xb5\x50\x92\x8f\xe3\x7f\x3a\x85\xf8\x98\x9a\x75\x40\x5f\xa9\x68\xd6\x78\xae\x54\xa4\x88\x73\xa5\xf5\x34\xf6\x1e\x3c\x18\x85\xfa\x2a\xf1\xdd\x20\x9a\x3c\x10\xe3\xc9\x83\x20\x52\xe2\x81\x3f\x8e\xfc\x07\x5d\xb7\xe3\x76\x1e\x5c\x85\x52\xc7\x0f\x9c\x96\x6c\x39\xee\x64\xe0\x94\x24\xd2\xa8\xc6\x22\x20\xd8\xf9\x05\x24\xec\x1a\xbb\xb9\x03\x82\xf6\x92\x66\x93\x68\x26\xdc\x69\x34\x25\x94\xf6\xcc\x37\xed\x72\xd0\xae\x8f\xdf\x4b\xc3\x99\x63\xbd\x2e\x96\x92\x70\x48\xba\x9d\xce\xa1\xa6\x19\x3b\xb9\xd3\x24\xbe\x22\x97\x58\x2f\x85\xad\x4e\x2f\x1c\x12\xc9\x18\x53\x29\x84\x7d\xe3\x44\xfe\x77\x11\x68\x67\x8b\xe9\xbb\xa9\x88\x86\x0d\x39\x9f\xcb\x64\x3c\x36\x42\x32\x7f\xca\x8a\x38\x59\xc5\x0e\xcb\xc1\x9b\xcd\x29\xd9\xa7\xb0\xd5\xed\x65\x6d\x4b\x1a\xa1\x6c\x48\x77\xfb\xb0\xd3\x6c\x12\xc9\x7c\x6d\x64\x99\x32\xff\x1a\x42\x24\x0d\x87\x64\xeb\x9a\x48\x9c\x99\xe6\x8f\x6e\x75\x4d\xf3\x52\xaa\xba\xbd\x8c\x3c\x14\xb9\x47\x6c\x4c\x46\x14\x4e\xd8\xea\x01\xdf\x1a\xa5\x8c\x5b\x8c\xee\x69\xc6\xb8\x2b\x5b\x97\x75\x0f\xf6\x44\xbf\xe3\xc9\x43\xd5\x6f\x77\xbd\xae\xe9\x8b\x2d\xe9\x6e\xa7\xdf\x89\x66\xa7\x44\x1a\x49\xeb\x72\x4a\xe7\xf3\xf4\xb7\x0f\xca\xf5\x29\xed\x6b\xcf\xfc\x0a\x40\xb9\x01\xc5\x76\xf7\xa4\xeb\xa3\x9c\x6e\x36\xb7\xaa\x65\x7b\x92\x61\x39\x23\xb1\xf3\x21\xd4\xf3\xb9\xc1\xd6\xef\x7a\xca\xf5\x4d\xfd\x1d\xbb\xc0\x3c\x5f\xd3\x4c\xdb\x28\x7a\xd8\x59\x50\xf8\xb6\x56\xb4\xa7\x40\xdd\xf5\xd3\xa3\x73\x78\x9a\x4d\xf4\xb5\x10\x2c\x03\xa1\xf0\x7d\x99\x9c\xf4\x6b\x2e\x07\x0f\x3b\xfd\x1b\xed\xa9\xfe\x40\x7b\x63\xbd\xa0\xf0\x94\x75\x8a\xa1\xb8\x2c\xa3\xbe\xe7\x9e\x04\xdf\x53\x8b\x82\x7d\xdf\x54\x65\x51\x06\x01\x81\xa7\x4b\x50\xef\x2c\x96\x6c\xda\xdc\x2f\xb0\xc7\x35\x72\x19\xb5\x4b\xc4\xb9\xbe\x28\x5e\x2a\xfb\x52\x99\x97\xb9\x6c\x35\xa5\x3f\xb0\x31\x79\x55\xe2\x94\x57\x16\xb1\xe1\x13\x63\xbd\xca\x51\x89\xad\x73\x3e\x69\xa9\x94\x35\xfc\xec\x95\xea\x59\x21\xff\xda\x0e\x72\x3a\xc2\xbd\x92\xa4\x36\xdc\x60\xdf\x52\xc1\x84\xeb\x17\xa0\xd9\xf8\x23\x3d\x67\xec\x7e\xdb\xeb\x2c\x0a\x82\x5e\x57\xfa\x6b\xdb\xeb\x42\xde\x67\x06\xfe\x23\x1b\x93\xd7\x25\xfa\x8f\x8d\x98\xca\xaa\x55\xec\x0c\x34\x93\xa9\xec\xed\xe9\x76\xbb\x47\x95\xa9\xf8\x5c\x57\x17\xae\xbc\xf8\xd7\x6a\xf1\xf3\x8b\x12\xd9\xca\x4a\x0f\xe9\xf2\x52\x49\x03\xf7\x73\xa3\x0a\x74\x7e\xd1\xc3\x39\xa0\x8d\x76\x62\x38\x1e\x34\xd3\xa6\x17\x2c\x3a\x5c\x9d\x50\xa4\xf1\x42\x9c\x1d\x1b\x1d\x97\xc2\x33\x46\x74\x0d\xb3\x11\x6b\xb9\x50\x29\xe3\x6e\x36\x45\xb5\x02\xc0\x6e\xa6\x89\xad\xe6\xae\xa8\x06\x44\xb5\xaa\xc4\x54\x15\x2c\xd5\x63\x14\xea\xac\x26\xbe\x54\x53\xb3\x99\xac\xaa\x0e\x12\x96\xb8\x3e\xe5\xb6\xd2\xdb\x6a\xa5\x90\x54\x2b\xe6\xa6\x62\xb1\xa2\x62\xa3\x89\x67\x55\x07\xab\xab\x6e\x36\xf9\xfa\xfa\x81\x33\xee\xfa\x34\xb0\x54\xdc\x2c\x53\x01\xbc\x4a\x49\x50\x9b\xfd\x1a\x0a\xa3\xe7\x98\x18\xa6\x70\xe3\x48\xe9\x75\x22\x06\xa5\x39\x8a\xf2\x05\xfe\x07\x2f\x2b\xf2\xe8\x17\xb1\xb1\x99\x81\x2c\xf3\x25\x63\x6c\xac\xfb\x1d\xcf\x3c\xdc\x68\x94\xc9\x58\xc1\x3a\x41\x25\x5b\x46\x07\x83\x17\x6b\xa5\xa1\x6c\x6f\xd0\x13\xe4\x6f\x58\xfa\xfd\xfa\xd2\x0f\x36\x95\x7e\xa0\xe6\x1d\xfc\xfc\x9e\xeb\x2b\x77\x1a\xcd\xd6\xeb\x2a\xff\x2e\x57\xe1\xf9\x77\x26\xb3\x96\x77\xcc\x52\xdb\x9f\x92\x6e\x97\x7a\x9d\x43\xd5\x6c\xca\xc3\xce\x7c\xae\xcc\xea\xd9\x39\x94\x7d\xd5\x92\x5e\xaa\x6d\x62\x65\x5c\x73\xb9\x43\x51\x04\x7d\x66\xf8\x26\x10\xe1\x18\xbe\xd8\xe7\xe1\x38\x8a\x14\x3c\xb1\x3f\x54\x94\xc8\x01\xfc\xb0\x3f\xc6\xd1\xa8\xb7\xae\x39\xcd\xe6\xa6\xc6\xce\xe7\x9b\xbe\x6e\x31\x66\x94\x2b\x43\xcf\x27\xb6\x69\xac\x7a\x7f\xeb\xe3\xaf\xe9\xd2\x76\x55\xe0\x4c\xb9\xc1\x15\x57\x4f\xa3\x81\x38\xd2\x24\xa1\x3d\x7e\xb8\xbf\xbf\xf3\xf8\x60\x3e\xdf\x3f\xd8\xed\x3e\x3e\xe4\x7d\x52\xd6\xb8\xc1\xa8\xe0\x5e\xf9\x55\x4b\x9d\x27\xad\x2e\x7e\x61\x3b\x74\x91\xab\x50\xdf\xa3\x50\x12\xc7\xa1\x1b\x0d\x9b\xf3\x0b\xa8\xe8\xc6\xd6\x06\xc9\x49\x33\x2a\x4e\xb0\x44\x61\xd2\x6a\x41\x50\xa5\x32\x98\xcf\x09\x6f\xd9\x02\x86\x42\x90\x84\x53\x23\x0a\x70\x66\xf3\x9c\x2c\x5d\x22\xab\xf7\xb7\xec\xa2\x8c\x26\x6d\x69\xd2\xbf\x4c\x93\xce\x69\xb2\xd6\x90\x59\xcb\x16\x85\x36\x8e\xa3\xff\x96\xfd\x0a\x2d\xc6\xa4\x4a\xe9\x48\x18\x1a\x55\xbc\x4a\x87\xa0\x3d\x5b\x47\xc2\xf8\x3f\xd9\xfe\xc1\xee\x4e\xa7\xd9\xdc\x7f\xb8\xbb\xb7\xfb\x4f\xc6\xfb\xfa\xbc\xdd\x16\x17\xad\xc4\x4b\xaa\x14\xc0\xef\xeb\x78\x4f\xb9\xf1\x74\x1c\x1a\xa9\xb5\xa0\xf0\xc7\x7a\x28\xec\x53\x04\x92\x92\xfd\xa2\xb5\xf4\x0b\x5c\x6b\x17\xe2\xfb\xd4\x80\x32\x6b\x71\x52\xe5\x05\x4d\x51\x31\x27\x82\x25\xf5\xe6\x26\x7d\x75\xde\x6e\xeb\x8b\x96\xf0\x0a\xfd\xb8\xb3\xc8\x14\xe5\xb4\xdf\x95\x64\xff\x32\x32\xb6\x7e\x9d\x8e\xee\x22\x27\x88\x82\x96\xeb\xf4\xf3\xad\xff\x50\x6e\x28\x07\xe2\xf6\xe3\xd0\xf6\xae\x58\x07\x6a\x04\x61\x0d\x36\x59\x0b\x9b\xe9\x39\x87\x79\x0b\x8d\xf6\x3d\xe6\xb1\x7e\x9d\x23\x60\xf9\xb7\x76\x06\xbe\xa0\xc0\x97\x71\xda\xae\xca\x55\xa7\x70\x48\xf4\x61\x37\xd3\xf6\xce\x4a\x8a\x5d\x07\x50\x0d\x69\x77\x0f\x89\x28\xd3\x8a\xd6\x59\xa6\x7d\x08\x0a\xa2\xc5\x74\x55\xe7\xc0\x91\x0a\xd6\x35\xc7\xa8\x7a\x1d\x18\x64\xba\xde\x3a\x51\x69\xc0\xba\x20\x0a\xb0\xc2\xd3\xba\x04\xb7\x0b\x43\x4f\xc2\x28\x53\x1d\x87\x9b\xaa\xde\x43\xa5\xfb\xca\x93\x8b\x94\x50\x7f\x6d\xbf\xe3\xcf\x73\x75\x61\x46\x27\xde\x30\x4f\x52\x38\xd0\x08\x39\x96\xab\x95\xbb\x3a\x30\x88\x8b\xf5\xfa\xd9\x32\x30\x24\x17\x9b\xb4\xaa\x55\x05\x80\x63\x91\x64\x65\x91\x92\x2f\xb3\x56\x08\x02\x2c\xc6\xd7\x14\x2b\x39\x2d\x97\x0a\x42\x84\x45\xa3\xb5\x45\x4b\xae\xc9\x15\x85\x21\xbc\x58\xb5\xfa\x68\x75\x97\x15\xba\x31\xaf\xde\x9c\x7c\xfc\xe0\x4e\xb9\x8a\x05\x6a\x66\x01\xd7\xc1\x55\xc9\x99\x3d\xd1\x64\x46\xae\x34\x38\xa7\x57\x61\xdc\x08\xe3\x86\x8c\x74\xe3\x86\x8f\xc3\x41\xc3\x94\xdc\x6a\x38\x2d\xe9\x4e\x44\x1c\xf3\x91\x00\x83\xc0\x28\x47\x03\xc3\x05\x37\xb2\xc4\x66\x37\x69\xed\xf1\x2c\x44\xfc\xee\x36\xbd\x0f\x78\x2c\x1a\xbb\x5e\xea\x20\xf0\xa3\x68\x2c\x78\xc9\x3f\xa0\xfa\x33\xa3\x2c\x7a\x57\x92\x38\xbc\xf1\xe4\xe3\xc7\x77\x8e\xd1\xfa\xb0\xd4\x4e\x56\x4a\x26\x13\x5f\xa8\xc2\x4a\x57\x7d\x04\x97\x8d\xd7\x1f\x4e\x0d\xb8\x47\xd4\x21\x6b\xef\x74\xf7\x1e\xee\x3d\xda\x3d\xd8\x7b\x38\x9f\x17\xcf\x87\x4c\xcd\xe7\xa4\x33\x57\xd4\x68\x22\xb4\xd9\x24\x5b\x61\xfc\x22\x94\xa1\x36\x5d\x31\x9f\xab\x7f\xef\xd2\x3a\x3a\x24\xc9\xd2\xb0\x57\xa3\x61\x0d\xe1\x2f\xde\x7d\x3c\x3a\x2d\x28\x3f\xc8\x4a\xd5\xed\xc6\xac\x94\x6a\x84\x32\xd6\x5c\x06\xe6\xe5\x09\x02\xe1\x97\x96\xe3\x64\x28\x4f\x4e\x8f\x5f\x7f\x78\x59\xe0\x7c\xec\x65\xb2\x2d\x75\xba\x98\x02\xd2\x0d\x2c\xbc\x79\x59\xc0\xee\x67\xb0\xa5\x96\x3c\xcc\xde\xa1\x96\xe4\x86\xb1\xd5\x96\x14\xed\x6f\x4b\xeb\xba\x80\xe3\xac\xee\x77\xaf\x4f\x4a\xad\x79\xf4\xd7\x25\x27\x32\x2d\x2a\x1b\x47\xc7\xc7\x47\x7f\x14\x85\xbb\x1d\x2f\x93\x9f\x83\x95\x0e\x25\x55\xb8\x91\xe6\xf3\xad\xcc\x44\xcf\xc4\x6b\x8a\xf4\xe3\x93\x37\xcf\x9f\x9e\x36\x66\xa1\xbe\x6a\xf0\xc6\x30\x14\xe3\x41\x43\xf2\x89\x18\x34\xfe\xa7\xd3\xd2\x2d\xe7\x7f\x62\x85\x56\x0a\xdf\xa4\x44\x9d\xeb\xc2\xc5\x19\x0a\x22\x68\x5f\x78\xc8\xe8\x53\x6d\x66\x10\x5a\x3d\x96\xc4\xae\x67\xc8\x13\xb8\xc2\xd5\xdb\x58\x23\xa4\x68\x5d\x38\x24\x2a\x5f\x65\x74\x05\xac\xf1\xee\xe3\x87\x97\xcf\x8f\x1b\x1c\x71\x35\x3e\x08\x31\x68\xe0\x62\xd0\x40\x62\x1b\x7e\xa2\x1b\x91\x1c\xdf\x35\x62\x21\x1a\x4e\x2b\x43\xd3\x72\x1a\x42\x6a\x15\x8a\x18\x2b\xf8\x85\x96\x8c\xea\x2d\xd9\xf1\xfe\xb2\x8b\xff\xa2\x81\xb6\xa7\xf3\xee\x4c\x80\x33\xbb\xc4\x25\x76\x60\xb0\xd9\x57\x3c\xfe\x38\x93\x9f\x54\x34\x15\x4a\xdf\x91\x84\xd2\xfb\x12\xb5\xc9\x85\x55\x16\x90\x54\x5a\x16\x31\x53\x0d\x49\x4a\x2f\x67\xaf\xc9\x25\xb1\xbf\xa0\x50\x5e\x67\x9a\xbc\xd6\xa4\x68\xd0\xae\x57\x18\xbf\xd2\x1d\x42\xc4\xa4\x3b\x82\x90\x75\x7a\xe1\x61\x94\xaf\xc8\xad\x56\x4a\x40\x74\x1e\x5e\xa4\x83\x53\xad\x5e\xf4\x02\x16\x10\x53\x59\xa9\xa6\x20\xab\x65\xcf\x2b\xc8\xaf\xf5\x34\xbe\xbe\xc2\x92\x46\x34\x88\xb4\xc4\x7e\x4e\xd7\x90\x9d\x81\x6f\xa8\xea\xf9\xae\xdf\xf3\x99\xef\xfa\x29\x31\xbe\xf5\xe9\x84\x43\x52\x23\x65\xc8\x5e\x1b\x84\x30\xcc\x89\x99\x68\x72\x87\x2d\x1f\xd2\xbc\xe9\x5e\x4d\x38\x5b\x17\x51\xfa\xb5\x53\x9a\xe1\xd2\xb4\xaa\xf0\xdb\x6c\x6f\x76\xcd\xaf\xdc\x1c\xb8\xb7\x3d\x6c\x05\x38\xee\x0d\xa4\x3d\x18\x54\x07\x70\xa4\xcd\xb2\x83\x03\x88\xfb\x09\x81\xcb\x4b\xfd\x69\xac\x84\x92\x4f\x7b\x52\xde\x42\x9d\x91\x48\x40\x4e\xc6\x8a\x0d\x55\xbb\xf1\x54\x94\xbe\xaa\x6a\x16\xf9\x12\xf5\xfc\x76\x2a\x02\x1d\xca\x91\x59\x94\x70\x31\x2a\xfc\xf2\x32\xf7\xd8\x2d\x7b\xb2\xa5\xbb\x6d\x56\x80\xdc\x85\xbb\xd5\xed\x2d\xad\x53\x1d\xaf\xda\xf5\xd2\xe5\x06\x8f\xcb\x7b\xe9\x32\x96\xae\x4b\xe9\xd2\x90\xca\xfa\x8a\xc8\xdd\xea\xd4\xc5\xb5\x1b\x20\x8e\x20\x13\xc3\xa9\x4c\xcd\x26\x6c\x0a\x36\x95\xb9\xff\x38\x17\x9c\x39\x86\x01\x62\x18\x34\x9b\xcb\x50\x25\x5a\x05\x42\x89\x55\x50\xbb\x05\xd4\x10\xa1\x86\xcd\xe6\xc8\x40\x8d\x40\xb9\xa3\x62\x1a\xe4\x50\x57\x08\x75\xb5\x0a\x57\xbe\xb8\x94\x10\x94\xd8\x6f\xb4\x5e\x6b\xde\x2a\x54\xee\x62\x10\x4a\xea\x73\x4f\x1c\x6a\xdc\xc8\x34\xcc\x67\x2a\xc6\x4d\xb5\x73\x71\xb1\xce\xfb\x3f\x5b\xab\x88\xa2\xc2\x63\x57\xdf\x70\x78\x47\x14\x18\x01\x08\x92\xb6\x1c\xa7\xac\x18\xdf\x95\x39\x50\x22\xce\xdb\x4d\xe6\x9d\xdd\xc8\xd4\xa9\x1e\x7c\x2d\x99\x41\x5b\xa0\x3b\x2a\xa1\xbb\xdf\xf6\x3a\xc0\x8d\xd2\x9c\x7f\x3e\xa9\x7e\xee\xd6\x3e\x9f\x56\x3f\xef\x80\xef\x49\x08\x3c\x53\x85\xd5\xd2\x9f\x6f\xd0\xd2\x77\x11\x7a\x80\x8a\x3f\x7c\xdb\x00\xb8\x57\x02\xc4\x56\x7c\x97\x65\x9f\xfc\x53\x24\x42\xa2\xeb\x19\x84\xf7\x5d\xb6\x5a\xa9\xa9\x80\x3d\x78\xe5\x9d\x5f\x2c\x32\x09\x79\x66\x60\x41\x16\x2d\xb8\x2c\xcf\xf8\x53\x49\xca\xd3\x5c\x92\x23\x49\x9e\x1a\x00\x4a\xcb\xf3\xfc\x4d\x4a\xa0\x74\xaf\xac\x81\xa4\x28\x20\x62\x6c\xf2\xbb\x4a\x4b\xca\x63\x51\x43\xff\xc6\x7e\x05\x5b\x0d\x56\x61\x9b\xf7\x41\xb2\xad\x2e\xbc\x92\xc6\x32\xcb\x2b\xc5\x0a\x8c\x9c\x78\x25\x53\x4f\x36\x85\xad\x0f\xa9\xbf\xdb\x94\xe8\xf4\x24\x7b\x25\xdd\xf8\x2a\x1c\x6a\x42\x7b\x74\xab\x1c\x15\x82\x3b\x3a\xca\x1d\xa6\x16\xb3\x34\xf3\xc9\xdd\x36\x2c\xde\xb1\xfb\x62\x5d\xf3\x4f\x0e\x37\x32\xb6\xe7\x08\xc5\x8f\xec\x51\xe5\x8e\x98\xf9\x19\xa2\x88\x35\x93\xc7\x36\xc8\x20\xc4\x0f\x3e\x31\xe8\xcc\x72\x93\x41\x2e\xc4\x38\x16\x86\x5a\x0c\x1e\xc9\xf7\x19\xdc\xa1\x1b\x60\xd5\x7e\xa5\x27\x0c\x1e\x69\xba\x10\x5d\x10\xe1\x90\xec\x5b\x6a\x52\xf2\x94\x7b\x55\x9d\x83\x69\xc5\x43\xac\xf8\x2a\x6b\x32\xc5\x4a\x1b\x86\x06\xc3\x5d\xe8\x84\xec\x78\x5d\x63\x0a\x1a\x50\x08\x3d\xe5\x8e\x16\x90\x95\x1d\x2c\x16\x0b\x22\x69\x0f\x7b\x7b\xb1\xd8\x60\xcd\xbd\x36\x03\x25\x40\xba\xc1\x33\xf3\xe7\xb1\xf9\xb3\x57\x2c\x08\xcb\x31\x37\xf4\x7e\xb1\xa8\xec\xe0\xbd\xae\x19\x72\x76\xed\x9a\x91\x81\x04\x09\xaa\xaf\xdc\xe1\x98\x8f\x62\xef\x26\x0a\x07\x8d\x0e\xed\xe1\x2a\x36\x9f\x4f\x49\xea\x16\x8d\xd8\xfd\x02\x42\x46\x02\xa6\x09\x2e\x65\x66\x25\x66\x9c\xf8\x10\x9a\x45\x71\x85\xed\x0f\x02\xa5\x94\x30\x1a\xd0\x47\x99\x79\xa7\x3e\x1a\xf1\xd4\x4b\x5c\xde\x6c\x12\xa2\x99\x9e\xcf\xef\x17\xf4\x5c\x5c\xb0\xc4\xe5\x04\xcd\x24\x30\x10\x2b\x10\x0a\x76\x3f\x42\x8b\xda\x92\xb8\x80\x84\x49\x37\x00\x6e\x74\x64\x30\x7a\x8e\x40\x3d\x67\x98\x6f\x4f\xb9\x57\xec\xa9\x24\x33\xf2\x5c\xe6\x1d\xd5\x28\x87\x2c\xe1\x17\x09\xf7\xdb\xde\x3e\xf8\x5e\x99\x19\xec\xde\x8d\x74\x79\xc5\x9d\xec\x6e\xf7\xef\x08\x07\x81\xc2\xcd\x0b\x9a\xcd\xa8\x7f\x8b\x31\x7d\xca\x0d\x41\xb9\xdf\xcd\xdb\x3b\x7c\x11\xf4\x95\x6b\x86\xda\xbc\x32\xc3\x00\xd2\xf5\x29\x5d\x90\xb2\x7f\x4d\x2f\x48\x04\x7e\x69\x80\x7c\xdb\x54\x33\x26\x02\xa4\xe9\xd6\x21\x09\x8d\xae\x00\x8a\xc2\x4b\x49\x22\x08\x5c\x1f\x12\x12\xd2\x1c\x47\xf5\x2d\xf0\xfe\xfd\x34\x52\x3a\xf6\xf8\xc2\xbb\x4f\x77\xb7\x24\xbb\x5f\xe0\x00\x1e\xff\xaa\x4c\x50\xee\x88\xd4\x45\xc2\x9a\xf5\x62\x46\xde\x49\x90\xee\x15\xa4\x62\x5b\x55\x59\xee\xeb\xe6\x68\x30\x14\xe6\xd7\x9e\x82\xb1\x11\xe8\x85\x6c\xfb\x59\x97\xe8\x13\xf3\xdd\xb4\xe1\xd9\x46\x51\x2e\x3d\x09\x51\xcd\x87\xf3\x32\x5f\x8c\x90\x85\x20\xc9\x76\x3a\x91\x2d\x5f\x48\xb2\xd5\x01\x05\x09\x2e\x74\x14\xcc\xef\x2e\xe8\xfc\xb7\xa4\x6f\xd2\xf5\xf4\x7e\xdb\x73\x86\xb7\x0e\x70\x2f\x39\x17\x17\xf3\xf9\x7d\xe8\x9d\xc1\x77\xef\xac\x12\xb5\xf6\xa2\x34\x6f\x53\x2d\x49\xe5\x5a\x52\xd7\xb3\x13\x40\xb9\xd7\xc0\x19\xe1\x2c\x81\x88\x09\x98\x11\xd9\xff\x28\xcf\xf9\x85\x2b\x3c\xfb\xef\xb0\xa2\xe7\x15\x5b\x89\x51\x4f\xe1\x76\xd5\x0f\x6a\x44\xe7\xd4\xc8\x8d\x7c\x09\x36\x3a\x62\xb1\x1d\x65\x26\x08\xd1\xe7\xc9\x85\xa9\x86\x43\xc2\x48\x82\xce\x66\x5a\xa2\x1b\x64\x3f\x71\x43\xf6\x9a\x70\x48\xdc\x90\x7a\x89\xfb\x3d\xfd\xf1\x9d\x42\x42\x73\x67\x42\x61\x48\x28\x77\xd2\x0b\x5c\xdf\x98\x04\xae\x4f\xb1\xad\x86\x39\x4d\x6b\xd3\x8a\x7b\x15\xb7\x05\x92\x91\xf6\x89\x1b\x81\x86\xfb\xa9\xa7\x5c\x09\x3f\x3c\xb1\xb0\xcb\x14\x87\xa8\xe8\xbc\xf7\xd8\xdc\x8f\xf2\x5c\x5e\x34\x9b\x53\xb2\x5b\xea\xd7\xcf\x55\xae\x43\x48\x40\x48\x76\x2f\xbc\x2f\x12\x94\xa7\x80\x7b\x4f\xe4\x02\xbe\xe6\x6b\xe0\x97\xb5\x5a\x4e\x25\x6c\xe5\x49\x3e\xe1\x35\x04\xec\xfc\x02\x22\x86\x98\x5d\x65\xa4\x9d\x66\x1d\xa8\x4d\x0f\x3b\x18\xb1\xd0\xa7\xe1\x44\x44\x49\x49\x66\x67\xab\x35\xa5\x0b\xd0\xf9\x60\x94\x3e\x07\x63\xc1\x55\x56\x4c\xa1\x3f\x28\x83\xb2\x75\xfa\x2c\xb4\xed\x72\x83\x35\x7e\xff\x9e\xca\x36\x2d\x69\xa1\x02\x06\x90\xb0\x88\x28\xb4\x09\xad\x79\x92\xe9\x8f\x1c\xc3\xa2\xf8\x05\x29\x02\xe5\xc2\x05\x85\xfb\x38\xf1\xe3\x40\x85\xbe\xa8\x88\xbd\x20\x5b\xd5\x17\x90\xc8\xd5\x20\x44\x9a\x25\x20\x48\x1d\xf6\x94\x96\x5c\xcb\xf4\xb0\x33\x9f\x07\xb8\x31\x80\xbe\xfc\x2e\x5d\xd8\x59\xfb\x43\xf6\xd6\x48\x9e\x55\x06\x0d\xee\x85\xd2\x4c\xcd\xfa\x24\x99\x93\xc8\x81\x18\x86\x52\x0c\x0a\xdb\x7c\x10\x05\xc9\x44\x48\xdd\xcf\x1e\xbc\xfb\xd2\x8e\xff\xdb\x5c\x39\xe2\xd3\xa9\x90\x83\xa7\x57\xe1\x78\x60\x3a\x7c\xd5\x02\x2b\x98\x70\x65\x34\x10\xc5\xb2\x31\xe5\x4a\x48\xfd\x21\x1a\x08\x57\x89\xe9\x98\x07\xc2\x22\xd8\x56\x44\x96\x97\xdc\x05\x05\x41\xe1\xbe\x22\x6f\x7e\x5f\xa9\xcb\x9a\x96\xfc\x51\xe1\xc7\xb2\x5b\xf4\x2f\x36\xc1\x3a\xa5\x31\xbf\xcf\x64\x08\xef\x89\x16\x4b\x5c\x7f\x3e\xef\x40\xba\x97\x95\x14\x5b\x6c\xad\x62\x97\x0a\x85\x6c\xe0\x05\x30\xf0\x06\x0a\xe3\x1f\x3d\x0d\x43\x8f\x83\xef\x09\xd4\x10\x48\xba\xe2\x83\x54\xff\x1d\x04\xfe\x1a\x89\x3b\xbf\x44\xa2\xdd\x99\x51\x9b\xb4\xf4\xef\x9e\xc4\x25\xc5\xf7\xba\x2d\xa2\xb0\x72\x5a\x19\x20\xad\x6a\x65\xf6\xcd\xf2\x03\x13\x4f\xc1\x75\xa6\x59\x2c\xd6\x08\x0e\xad\xc8\xb9\x04\x75\xb1\x42\xef\xb2\x7a\x63\xca\xb4\x42\x6d\x30\x86\x2c\x0e\xd0\xab\xb0\x64\x61\xcf\x0b\x74\xa7\x2b\xb6\x41\x21\xcc\xf1\x80\x58\x85\xa9\x08\x87\x46\x5c\x5c\x31\xb2\x71\x03\xa0\x84\x0e\x92\x55\x08\xcb\x61\xd2\x8b\x5f\xd8\x20\xa8\x20\x04\xbe\x0a\x65\x35\x72\x7a\xf1\x4b\x5b\x08\x35\xb4\x10\xac\x42\x5c\x8f\xa5\x5e\xfc\xe2\x36\xc3\x12\x72\x88\x56\xa1\x5f\x0e\xae\x5e\xfc\xf2\x66\xc4\x8a\x2a\x20\x5c\x55\xc9\xaa\x60\xeb\xc5\xfa\xc8\x0d\xa3\x9f\xf0\x8e\x53\xd2\x81\x28\x04\x1b\x66\x8a\xc3\xbb\x65\x60\x88\x36\xc2\xee\x54\x60\xc3\x8d\xb0\xbb\x65\xd8\xde\xba\x79\x80\xa0\x7b\x06\x54\x41\xe4\xdd\x0f\xb1\x84\x5e\x54\xa6\xea\x50\x15\xb2\xd4\x31\x6b\xd1\x54\x3b\xc6\xa0\x72\xa6\x8e\x27\xd7\xcc\x51\xd3\x09\x68\xae\x6d\xf7\x67\x84\x2b\x30\x0a\x07\xd1\x4c\x82\x64\x63\x81\xfb\x8b\x11\x35\x02\x47\xb8\xdb\x46\x34\xf7\xef\x88\x2f\x40\x1e\xee\xf6\x63\xe5\x8d\x15\xc4\xc2\xa8\xbe\xc2\xe5\xd4\x9b\x91\xa1\x48\xfd\xc4\x0b\x4a\xbd\x34\x3e\x0d\x44\xb6\x9b\xa7\x20\x5e\xd7\x0f\x8d\x4b\x22\xed\x72\x6c\x44\xa2\x19\xb3\xb5\x3d\x16\xbc\xf3\x0c\x68\xf0\x8e\x02\x7f\xe2\x29\x97\x3f\x01\x7e\x63\xfe\xbd\xa9\x74\x05\xca\xc5\x92\x2e\x78\xbf\x28\x85\x95\xe5\x4e\x23\x0e\x82\x69\x77\x1b\x12\xa6\x5d\x89\x21\x00\x51\xcf\x0c\xde\x16\x63\xa2\x4f\x34\x53\xa8\xba\x12\xf3\x0f\x33\xab\x97\x19\x2c\xc6\x98\x68\x36\x9d\x60\xcc\xe3\xd8\xfc\x48\xfa\x37\x8a\x68\x7b\x5a\x01\x55\x48\x4e\x3d\xfb\xf5\x03\x9f\x88\x1c\x42\x59\x08\x85\x10\x8b\xe5\x30\xb8\x1b\x55\xd1\xbb\x99\x3c\x57\x17\x3d\xf3\x87\x89\xbe\x68\x39\x0d\xa7\xa5\xbd\xd2\x79\xb5\x6d\x55\x75\x7f\x6d\x67\x16\x78\xbe\x45\x60\x20\xdc\x6b\x8c\xf0\xbc\x66\xd2\x9d\x60\xfc\x31\xcd\xbc\x07\x39\xd8\x27\xe9\x06\x4a\x70\x2d\x4e\xc5\x2d\x2e\xe1\x36\xda\x2e\x1c\x92\x3d\x04\x2b\x79\x77\xa5\x7b\x8d\x26\xe4\xf7\x9e\xf9\x24\xdc\xed\x1e\x5d\xda\x03\x48\xfa\x09\x3b\x4f\x40\xb8\xdf\x2f\xbc\x6c\x27\xda\x28\xc8\x46\x69\xb8\xee\xd9\xb8\x8f\xfb\xef\x5e\x02\x53\x4f\x65\x0e\x1e\x12\xb0\x6d\x45\x04\x18\x1b\x59\x8c\x27\x97\xe2\x46\x48\x7d\x69\x54\x8c\x4b\x25\x86\x8c\x43\xb0\x08\x87\x64\xb7\x4c\xf5\x44\x11\x63\xc0\x5e\x11\xe9\x8e\x28\x28\x90\xee\x80\x42\xd0\xcb\x1d\xf8\xfd\xbc\x59\xcf\xc7\xc2\xa8\x3b\x1f\x4e\x88\x74\x87\x80\x1b\x59\xf5\x6f\xb8\xbd\xd5\xfb\x21\x9b\x4d\x87\x9b\xf9\xe2\x06\xcd\x66\xe0\xf2\xc1\xe0\xb9\x21\xe4\x5d\x18\x6b\x21\x85\x22\x4e\x30\x0e\x83\x6b\x07\x7e\x48\x12\x50\x0a\x86\x84\xb4\xe6\xdc\xb9\x18\xa1\x51\xbd\x62\xdb\xe0\xad\x24\x01\x6c\x2b\xd2\x35\x8d\xe8\x47\xe7\xe1\x85\x67\xfe\xe0\x46\x40\xae\x68\x06\x25\x9f\xb6\x5a\x72\xae\x1b\xf3\x4b\x97\x43\x56\x7a\x46\x24\x99\x81\xe8\xaf\xf4\x25\x30\xe9\xc6\xfa\x6e\x2c\x56\x46\xa4\x2e\x88\x84\x84\x7a\xe9\xe4\xaf\x62\x28\xdb\x7e\xd2\x0c\xc8\x8b\x18\xb9\x08\x9f\xcc\x34\x28\xcc\x41\x5d\x84\xf2\x88\x0b\x08\x98\x31\xf5\x0c\xeb\x70\x74\x03\x05\xf6\xaf\xfb\xc3\xdd\x66\x8c\x71\x34\xec\xdc\x1f\x8c\xf7\x82\x48\xea\x50\x26\x62\x21\x5d\x25\x26\xd1\x8d\xa8\x76\xb4\x30\x2b\x50\x50\x38\x34\x42\x30\x53\xb9\x74\xee\x27\xb3\x29\x86\xee\x0f\xd0\xec\x06\x45\x07\xc8\x6c\x0b\x44\xd3\x52\xaf\x41\x62\xc4\x98\xa2\xa0\x98\x76\x39\x70\x96\xf4\x93\xc3\xdd\xbe\x72\xb9\x67\x84\x88\xa7\x40\xb3\xae\x99\xa2\xca\xf5\xbd\x5d\xc6\x92\x66\x13\x65\x4a\xc0\x88\x6e\x36\x4d\x17\x46\xd3\x4f\x2a\x9a\xf2\x11\xb7\xcb\x0d\x90\x9d\x25\xf0\x1b\x6a\x40\xa7\x0a\x19\xf7\x99\x18\xf2\x64\xac\x09\x85\x90\xf6\x04\x0b\xdc\xef\x3d\x1b\xdc\xbb\x1c\xb5\x2e\x28\x67\x82\x70\xda\x43\x1f\x58\xc1\x44\xb9\x35\x12\xb5\xdb\x3d\x03\x73\x1e\x5d\x18\x30\x63\x47\x4c\x17\x01\xe1\xe8\x25\xc9\xd6\x6e\xf7\x07\x93\x30\x5c\x10\x05\x9c\x82\x5c\xe6\x5b\x01\x01\xf8\xaa\xd9\xbc\x9f\xf2\x38\x0e\x6f\x84\x37\x36\x75\x1e\xee\x18\xed\xc1\x08\xb6\xc0\xba\xe0\xd6\x8f\x85\x05\xcb\x54\x3d\x64\x11\xe4\x9d\xdd\x55\xdc\x97\xab\xba\x96\xe3\x4a\xb1\x3e\x3d\xd1\x97\x6e\x2c\xf4\x91\xd6\x2a\xf4\x13\x2d\x88\x3d\x62\x96\xd6\x5b\x7a\x4d\x17\x39\x7f\xee\xfd\xbd\x3a\x20\x61\xc2\x1d\xa2\xb4\x89\x96\xea\xfb\x70\x42\x12\x58\x5d\xa7\xfd\x54\xd4\x7b\xc3\xc7\x89\xc8\x45\xfd\x95\x08\xae\xc5\x20\xfd\x89\xce\x36\xc6\x12\x33\x27\xd0\x0d\x47\x17\x0b\xad\xee\xee\x67\xa1\x1c\x44\xb3\x15\x62\x43\x3b\x76\x57\xe0\x23\x8a\x4a\xd7\x9a\x66\xf9\xa6\xe6\xfd\x02\x9c\x74\x60\x1c\xb8\x1f\x09\xed\x95\x54\x1b\x5f\xb1\xad\x8e\x51\x4d\x8a\x50\x8a\xd2\xce\x55\x65\x09\x38\xcf\xa3\xcc\x47\xa9\xec\x80\x0e\x2d\x9f\x70\x9e\xaa\xb2\x09\x77\xbf\xed\x29\x50\x9e\x86\xd8\x13\xa0\x53\x3d\x1e\x92\x4c\xa1\xcf\x1d\x25\x45\x30\x51\x69\xeb\x45\x55\xce\x7b\x60\xec\x65\x26\x98\xa4\x51\x13\x8c\x36\x61\x26\x63\xb2\xc5\x98\x15\x05\xdd\x2d\xec\xb1\x1d\x7c\x51\xf6\x76\x4c\xcd\xda\xd9\x01\x81\x1b\xa2\x6c\xb5\x1f\xc7\xc8\xd4\x5f\x3c\xcc\x64\x96\xd7\xfc\x48\x5a\x6a\xc9\x49\xd7\x18\x4a\xd2\x1d\x80\xf0\x04\x0c\x3d\xb3\x0e\xf8\x9e\x74\xfd\xc5\xc2\x08\x06\xce\xba\x8b\xd4\xf7\xc4\x53\xcf\xd3\x7e\x65\x37\x78\x0c\x91\xa9\x1c\x42\x16\xe4\xfb\x8a\x2c\x64\x8c\xe5\x12\x7e\xd8\x6c\x86\x66\xa6\x0e\x59\x70\x1e\x1a\xe6\x30\xb2\xdd\x74\xc0\xb0\xdc\x56\xa2\x70\x21\xbe\xa6\x3d\xf3\xa0\xcc\x8a\x6c\x15\xa3\xda\xd8\xb9\xd7\xa0\xdc\x6b\xf0\xcd\xf8\x61\xb9\xce\xa1\x9f\x47\x97\x61\x7f\x75\x41\x80\x4f\xf3\xd0\x91\x8c\xd8\xd8\xac\xcd\x30\x66\xca\xfd\x0e\x03\xb6\xd5\x85\x1b\x53\x1d\x2e\xd6\x37\x66\xb1\x1e\xb0\xad\x0e\x2c\xad\xd8\x71\x3f\x66\xe7\x31\xdc\x98\x15\x3b\x4e\xc3\xb7\xcd\x8a\x7d\xc3\x6e\xdc\xeb\x7c\x65\xdb\x66\x2a\x45\xb5\xbd\x1e\xd5\xb8\x3f\x66\xe7\x63\xd8\x36\xa8\xc6\x16\xd5\xb6\x41\xb5\xcd\xb6\xdd\xeb\xac\x89\x83\x66\x33\x4e\x9b\xb3\xc5\xd8\x38\x7d\xec\xd7\xb9\xc1\x23\x64\xb0\x6e\xda\xb3\x4e\x4f\x1f\x16\x67\x0c\xec\x4e\x9e\x3c\xd7\x17\x86\x13\xcf\xf5\xc5\x8a\x6d\x3c\x12\xc3\x98\x7a\x31\x63\x6c\x4c\xe7\x73\xac\x67\x07\x04\x8c\x6d\x17\x9b\x7e\xbf\x81\x6d\xc3\xd2\xad\xee\xd2\xde\x37\x0e\x82\x74\x39\xee\x2b\xf2\x74\x0c\x76\xd1\x8d\xcd\x97\xb6\xd1\x11\xdd\x2c\x9f\x21\x70\x5b\x0f\x33\x5a\x82\xb8\xce\x20\x76\x3d\xdc\x3f\xbe\xc2\x7a\xae\xd6\x4e\x13\x9f\xdd\x19\x2e\x19\x80\x32\xaa\x87\x9f\xd2\xb3\x87\x3c\xd1\xf3\x99\x72\xc3\x62\xc3\xb5\xdc\x82\x0c\x72\xdf\x72\x4f\xd9\x81\x5c\xa2\x26\x49\x97\xf4\x5e\xbe\xa1\x6c\xd6\xb6\x74\x37\xb7\x4f\x08\x2f\xd7\x4e\x4b\x95\x73\x63\x8c\x66\x72\x81\x7a\x25\x8a\x4b\x67\x8e\x55\x4d\xc1\xc8\x2c\xb5\x22\xf4\x43\x52\xa3\xd6\x6c\xe1\xd2\x6a\xd4\x93\xec\x69\x37\x7f\xda\xc3\xa7\xbe\x0d\x14\xe9\x93\x88\xc9\xf3\xe4\x82\x32\xc6\x88\x0d\x73\xa6\xcd\x66\x2a\xbf\xd3\x12\x99\xfc\xb6\x32\x28\xd5\x79\x74\xb3\x49\x48\xc0\x22\x6a\x94\x13\x12\x31\x4e\xdd\x6d\xdc\x86\x0e\x5c\x0e\x51\x7a\xdc\x8a\x08\x26\xec\x7e\x8b\xd5\xeb\x2b\xbf\x75\x3f\x55\xc0\x74\xdf\x71\x32\x55\x4a\x9b\x0a\x76\xed\x5b\x2b\x4b\xd1\x56\x33\x62\x69\x08\x51\x26\x5e\xbd\xe5\x93\x3e\xe7\xc9\x85\x41\x63\x56\x0a\x2f\xed\xe4\xec\x68\x9a\xa9\x11\x12\xd3\xd9\x75\x82\xb0\xdb\xc2\x34\x62\x06\x7b\xaf\x42\x74\x68\x04\x62\x58\x3e\xdb\x59\x1c\xe2\x2e\xc9\xf0\x4c\x72\x0b\x94\xdc\x02\x24\x4b\x32\x41\xa7\x18\xcf\xa6\x99\x3a\x94\x7d\x1c\xd4\x03\x10\x70\x7f\xe3\x29\x08\x3d\x3c\xdc\xe0\xc9\x43\x95\xf2\xc1\x43\xfb\x49\x82\xf0\xf8\xa2\x50\x8b\x03\x26\x0f\x55\x1f\x2d\x57\xd6\xe9\x45\x87\x41\x2f\xca\x82\x41\x42\x96\x9c\x47\x17\xbd\x91\x22\x21\xf0\xf3\xe8\x02\x34\xb4\x5a\x36\x76\x35\x44\x67\x54\x89\x4b\xaf\xd5\xea\x03\x39\xc0\xd9\xfd\x22\xf3\x45\x5b\x05\xdc\x34\x63\x98\x0b\x68\xf0\x59\x98\x3d\xc6\xac\x03\x63\xd6\x81\x01\x13\xbd\xf8\x70\xd8\x6c\x8e\x0f\xfd\x74\x83\xf5\x06\xb6\x19\xb9\x61\xd1\x79\x7c\x41\x5d\x0e\x13\x46\x9e\xb3\xf0\x7c\x8c\x3f\xae\xd8\x8d\xeb\xc3\x94\x3d\x77\x7d\x23\xd8\xb7\xb7\x18\x9b\xd8\x52\x23\x98\xc1\x1d\xdc\xc2\x35\x1c\xc1\x89\x29\xdc\xea\x5e\xc0\xa9\x29\xd8\xea\xe2\x22\x70\xd2\x6c\x92\x19\x3b\x71\x7d\xb8\x63\x13\xc3\xa6\x23\x76\x62\xf8\x0b\x4e\x9b\x4d\x72\xcd\x4e\x5d\x1f\x8e\x98\xd1\x90\xc9\x2d\x3b\xc5\x0f\x47\xcd\xe6\x1d\x1d\x29\x72\x05\xd7\x90\x40\xab\x35\xa0\x70\xa2\x30\xd9\xc4\x36\x4c\x61\x6c\x54\xb2\x41\x8b\x5d\x59\x4f\xe1\x69\xf6\x65\x66\x21\x07\x2d\x36\xb3\x5f\xe2\x16\xdb\x81\x71\x8b\xed\x58\xfd\x32\x1c\x92\x23\x3a\x68\xb5\x32\x5c\x93\x0c\x57\x5e\xd3\xa0\x8c\x37\x6e\xb1\x6e\xb5\xf4\x1d\xcd\xeb\xba\xca\xeb\x4a\xa1\x47\x8a\xcc\x60\x9a\x51\xbb\x4c\x43\xb7\x97\x6d\x2e\x6f\x9d\xcc\xe7\xa3\x2d\xc6\x6e\xa9\xaf\x04\xbf\xee\xd5\x71\xd6\xa9\xab\xd5\x71\xbd\xbe\x8e\x9d\x85\xd5\x64\xb1\x3d\x65\x5a\xf2\x16\xb5\x60\xdc\x6a\x2d\x70\x5b\x20\x3e\x1c\xf6\xb2\xf6\x94\x06\xdd\x8e\xf3\x72\x41\x7b\xbc\xb2\xe0\x95\xe7\xf0\x8d\x7d\x9b\xcf\xcf\x2f\x7a\x29\xbd\x25\x5e\x79\xee\xfa\x90\x2a\x54\xdf\x28\xd6\x48\x3a\x87\xd9\x94\x9a\xcf\x3b\x87\x41\xfe\xfc\x2d\x93\xa0\x8f\xcc\xcc\x99\x79\x09\xdc\x7a\x01\xdc\x79\xdf\xd2\x0d\x9f\x23\xc5\x9c\x4b\x31\x9e\xfc\x7e\xf0\xe4\x5d\x29\xa9\xcd\x89\x5a\xb5\x35\x8d\xe7\x0f\x4d\x0f\x07\xd9\xda\x91\x1e\x0a\xbb\x57\x5e\x02\x47\x5e\xc0\xee\x03\xaf\x03\x3f\x3d\x01\xe6\x45\x9c\x7b\x6f\x53\x3d\xc3\x94\x67\x01\x9a\x51\xc6\x4e\x0d\xdc\x80\xde\xd7\x30\x2c\x28\x04\x6e\xc0\x76\xd2\x1d\xee\x8a\xe2\x12\xb8\x3f\x41\x40\x04\x81\xab\x0c\x94\x62\x89\x45\x1b\xb8\xb1\x1b\xb3\xfb\x99\x17\x59\x0c\x8b\x8c\xfa\xd6\x91\xca\x5c\x99\x45\x70\xca\xf2\x4a\x94\xb7\x8b\xe7\xc1\x05\xdc\x92\x96\x11\x12\x54\x09\x11\xc0\xdd\x9f\x10\x40\x92\x2e\xee\x53\x45\x14\x3c\x86\xc4\x74\x70\x00\x47\x46\x34\x2d\x4e\xab\x34\xd8\x30\x84\x7b\xc5\x72\xd8\xcc\x83\x8e\x9d\x72\x1f\x78\xdd\x52\xbf\xa9\xf2\xda\xf9\xbc\x24\x95\xb6\x4a\xdb\xe6\x35\x8f\xa9\x15\x76\x46\x49\xed\xd9\x70\xbe\xd0\x55\x96\xa9\xcc\xda\xca\x53\xb6\xf2\x21\x66\xa1\x55\x9e\xcd\x10\xc4\xfd\xe7\x86\x1e\xed\x5e\x43\xe8\xc6\x10\x51\xef\x11\xbe\x25\xa1\xab\x99\x82\xd0\x4d\x58\x04\x9d\x43\x62\xe4\x5b\xec\xce\x68\xae\x26\xda\xea\x7d\xe8\xa4\xd5\x53\xef\xf1\x72\x41\x62\xea\x8a\xcd\xb2\x12\xbb\x47\x6e\xcc\x94\x45\xb5\x19\x11\xf5\xca\x38\x28\x6c\x11\xd3\xaa\x56\x0b\x37\x4b\x09\x36\x8b\xfe\x33\x67\xc0\x04\xb9\x78\xcc\xb4\x6d\xd3\x1e\xea\x5f\xb9\xf0\x1e\x30\x9d\x2a\x95\x03\xab\x54\x0e\x0a\x35\xd1\x54\x3c\xb0\x1d\xd8\xea\x42\x00\x6a\x85\x6b\xc8\xce\x91\x1b\xa6\x5d\xd1\xcb\x94\xd4\xe0\x2a\x1c\x0f\x3e\x44\x03\x11\xe7\xcb\xcf\x84\x75\x7a\x93\xc3\x9b\x6c\x21\x9b\x64\x6b\xcf\x95\xb1\xfc\xd9\xb8\x7f\x73\x3e\xb9\xf0\xcc\x1f\x94\xf0\xad\x16\x6f\x11\x3b\xf1\x71\x2a\xf0\x43\x36\x6c\x36\x87\x87\x6c\xda\x6c\x92\x84\x49\xb2\x7d\x3e\xb9\x80\xab\x74\x6c\xa7\x90\xf7\x41\xad\x07\xf2\x2e\xe8\x71\x36\x5d\xe4\xfd\x91\xd9\x66\xd0\x01\xe5\xfa\x50\x4e\xac\xf2\x4d\x2d\x6d\x5a\xa0\xab\x2e\x53\x91\xa5\x47\x4a\xcc\x06\xdf\xcd\xb3\x2e\x07\x26\x7e\x57\xab\x14\x65\x55\x56\x94\xeb\xb6\xb2\x06\x51\x8b\x1c\x59\xda\x87\xcf\xf5\xe0\x95\x4e\x22\x26\x4b\xdb\x7f\xa0\xd9\x36\x32\x2c\xed\xe9\x15\xe3\x35\x9f\x93\x55\xaf\xad\x97\xa9\x3e\xb6\x3d\xd1\x6c\xea\x2d\xc6\x64\xb3\x59\xdb\x56\xd4\x20\x4b\x47\x98\x71\xb7\x3c\x06\xe5\x26\xb5\x58\xfa\xd4\x89\xe6\x26\xe6\x3b\x85\xfa\x66\xbb\xcc\x90\x3e\xe3\x9a\x93\x0e\xc8\x5c\xe7\x29\x41\xe7\x6a\xbd\xed\x5a\x37\xae\x2b\xf3\xab\x48\xef\xaf\x7a\xe9\x7e\x67\xca\x8d\xbd\x55\x9f\xd8\xfd\x77\xcf\x34\x61\xea\x29\x37\x59\x64\x55\x1f\x78\xe5\xf3\x53\x71\x9a\x86\x45\xbb\x21\x86\x2e\x66\x1e\x0c\xdb\x23\xb2\xc4\xf6\xe7\xda\xbd\x29\x54\x3e\x99\x85\x83\x16\x0a\x13\x41\x7c\x14\xd5\xbd\x4a\x41\xc1\x4c\xd1\x9e\xc8\xd7\xaa\xb4\xa6\x50\xc6\x42\xe9\x27\x62\x18\x29\x41\xb6\x15\x49\x30\x5e\xd2\x4d\x28\xf0\x7a\x3d\x8f\x8d\x09\xb3\x95\xd6\x40\x0b\x27\x42\x79\x83\xb8\x44\xb6\xe9\x67\x2b\xc0\xb5\x7b\x54\x36\x56\x1a\x9d\x2d\xb3\x00\x29\xf4\xab\xad\x2d\x1c\xb8\x31\xb3\xb3\xc0\x9d\xe5\x43\xf6\x68\x15\xbb\x66\x5e\x12\xdb\x91\xd5\x2f\xe1\xb0\xec\x6e\xd0\x2c\x77\x0f\x3f\x4b\xb7\xca\x5f\x28\x3e\x42\x3f\x71\x9e\x0b\xa7\xdc\x3f\x99\xfa\x7c\x2e\x2e\xdc\xa3\xde\x5b\x69\x2c\x4b\xc6\x58\xe2\x06\xfd\xc4\x8d\x3d\xd3\x5f\xee\x4f\xec\xae\x52\x24\xd3\x82\x68\xf7\xce\x1e\xda\xcf\x1b\x50\x64\xae\x60\xda\xbd\x4d\x03\x13\x92\x72\x60\x42\xba\xce\x27\xe7\xdc\x68\xba\x81\x7b\x04\x11\xdb\x41\x47\x44\xd0\x8f\x6c\x5d\x51\x5a\x57\xaf\x36\x6c\x11\x54\x86\x3a\x70\xd5\x05\x5d\x88\x66\x13\x77\xfe\x45\x29\x30\xc6\xe6\x55\xa8\x1e\xef\x50\x6e\x4c\x24\xed\x0d\xac\x67\xd3\x9b\x92\x6e\x87\x2e\x16\x24\xc1\x74\x21\x0c\xa7\x28\x91\x4c\xe4\xed\x2b\x85\x6a\x3e\x55\x69\x04\xa4\x4d\x2e\x65\x58\xfe\xf4\x6e\x2a\x32\xd6\xf8\x5d\x12\xe9\x6a\x71\xab\x9f\x46\x52\x0b\x69\x8f\xff\x75\xb7\xd6\x80\x3a\x4e\xd1\x49\x59\xa2\x01\x9e\xb9\xf0\x62\xa8\x1f\xef\x2c\x9d\xee\x54\xec\x35\x99\x91\x50\x41\xe2\x4a\x3e\x11\x90\xb8\x68\x21\xe2\x8e\x48\x71\xe2\x5e\xba\x9a\x8f\x3e\xf0\x89\x70\x75\xf4\x2e\x9a\x09\xf5\x94\xc7\x82\x50\x08\xd8\x19\x5a\x16\x45\x07\x82\x28\xbc\x3f\x58\x57\xc0\x5e\x93\xa7\x8a\x44\xe7\xe2\x82\x42\x90\xf7\xe7\x1d\xf9\x03\x8f\xa9\x42\x50\x89\xa5\x50\x20\x41\x97\x36\x69\x31\xd4\x10\xf3\x78\x3c\x33\x7f\x1e\x9b\x3f\xa5\x60\x45\x3c\xcd\x9e\x45\xd8\x0f\x7c\x48\x58\x80\xdd\x03\x9c\x3d\x55\xa5\x48\x95\x77\x95\x78\x8b\xdc\x4d\x2e\x70\xa2\xb1\x2b\xa3\xa8\x2a\x33\x62\xdf\x14\x31\x6b\x97\x59\x3d\x8c\xc9\xb7\x28\x36\xe2\x2f\x2b\x5b\xe8\x96\x4c\x51\x25\x53\xac\x25\x53\x80\x2c\x85\x33\xdc\xa2\x6b\xfc\xd6\x1e\x01\x40\xb2\x03\x33\xb5\x74\xa8\xc7\x02\x22\xf3\xe8\x47\x83\x3b\x08\x4d\x13\xa2\xf5\x4d\xf8\x21\x59\x62\xc3\x18\x18\xc7\x33\xff\xec\x0f\x49\x1c\x53\xd4\xa1\xe4\x8c\x12\xe5\x06\x3c\x6d\x5d\x68\xd6\x9d\xc8\xb4\x2e\x82\x10\x0c\x5b\x43\xc8\x34\xfc\x90\xac\x03\x01\x3a\x55\x82\x83\x66\x93\x64\x44\x30\x3c\x0e\x7d\x40\x6d\xf3\xe1\x8d\x5a\x19\x38\xa3\xc4\x8f\x44\xc4\xfa\x48\x86\x13\xdc\x01\x78\xa1\xf8\x44\xf4\x57\xbe\xad\xc4\xfd\x94\xe2\x9d\x24\x74\xc5\xee\x83\x83\x0e\x2d\x45\xdc\xbc\x53\xc4\x7a\x62\x89\x4e\x4f\xc0\x94\xa3\xa3\x39\xa1\xf7\x09\x6a\x27\x49\xbf\xe3\x91\x37\x8a\x70\x0a\xb8\xd3\xda\xcd\x27\x59\xed\x24\x1e\x93\xa0\xfa\x04\x61\x50\xfc\xa0\xb2\xd2\x35\xea\x5a\xc7\xfe\xb4\x48\x12\xb6\x53\x8e\xa2\xff\xa0\x8a\x0d\xf4\xe7\xc2\x74\xcd\x38\x0a\xb0\x45\xee\x95\x59\x84\x5d\x3e\x9f\x4f\x49\x97\x2e\xd6\xc6\x33\x46\x11\x7c\x13\x95\x90\x2f\x7a\xaf\x9a\xcd\xab\x30\xd6\x91\xba\x73\x47\x11\x51\x14\x24\xb1\x69\x1a\xb0\xa5\xaf\xd6\xee\x02\xaf\xc6\x96\xa1\x32\x86\xc8\x89\xe6\x5a\xa0\xcf\xdc\x81\x12\x5e\x38\x53\x6b\x53\x28\x6c\x46\x9a\xea\x00\xeb\xf0\xde\xd7\xdd\xf9\x65\xef\xfc\x02\x56\xec\x98\x78\xd5\x80\x62\x78\xbd\x9a\xad\xec\x6e\x41\xdf\xfe\xe3\x9d\xa9\xf2\xbe\x7f\x35\xd0\xe4\x52\x92\x5a\x40\x5d\x39\xb7\x29\xbd\x7f\x2a\x49\x82\x01\x60\x45\x82\xd3\xe5\x1d\x20\x55\xdd\x01\xc2\xd3\xd2\x25\x42\xf5\x9a\xbd\x1f\x1b\x72\xb7\x2a\x86\x22\x3d\xc3\x53\x3e\xc5\xa4\x68\xff\x5a\xe3\x2e\xbe\x77\xa4\xcb\x7b\xf1\x1f\x53\x5e\x5f\x11\x3d\xab\xe8\xfd\x1b\x55\x1e\x17\x1b\xe8\x9e\x05\xa9\xb9\x23\xa1\xd3\x4d\xda\x27\x77\xaf\x07\x66\xae\x28\x22\xfb\x47\x92\x18\x99\x46\xbd\x13\x49\x06\xb8\xe1\x67\x27\x31\x46\xed\xaa\x6a\xd4\x6e\x11\x28\xf7\xb1\x2e\x5a\xb2\xd5\xea\x5c\x5d\x10\x0a\x4f\x37\xc5\xed\x6a\xb6\x1c\x70\xf2\x5a\xb9\x71\xa0\xa2\xf1\x18\x21\xe1\xe9\xa2\x1e\xf8\x58\x6d\x19\x86\x3a\x6a\x42\x4b\x67\x06\xf4\x86\x78\x8f\xf5\xe4\xa6\xb5\xbe\x13\x43\x63\x86\x65\x3f\x4f\xa3\x29\xd3\x69\x23\x0c\xee\xaf\x8a\xfd\x55\x82\x99\x5c\xe3\x0d\x98\x6a\xf1\xc3\xc2\xeb\x17\xb1\x4e\x2f\x68\x36\xa3\x43\x6e\x17\xd1\xd0\x68\x33\xc5\x21\x7d\x63\xde\x33\x79\x1e\xb5\x5a\xb8\x11\x76\xae\x5a\xad\x8b\x66\x93\x74\x3b\x8c\x85\x7d\xa2\x5b\x2d\x10\xac\x4b\x3d\x22\x5a\x2d\xc0\x34\x0e\x8c\x91\x83\xdd\xbd\x47\x8f\x9a\x21\xed\xd7\xca\x79\xdd\x62\xff\xfb\x0d\x09\xfa\xca\x6b\x77\xd3\x28\x2c\xf8\xb9\x21\x2a\x4c\x1d\xe6\x66\x51\xb5\x0a\x5d\xa5\x94\xf6\x25\xd1\x6e\x9c\xf8\xb1\x36\x86\xc9\x0e\xa5\x7d\xd5\xda\xf1\xda\x5d\x4f\x12\x7d\xae\x2e\x68\xdf\xf9\x53\xa2\xbb\xf6\x5c\x5d\xf4\xdb\x3b\x9e\x6a\x75\xcd\xd7\x76\x77\x41\xe1\xd9\xa6\xb0\xb4\x6a\x3d\x46\xbb\x59\x50\x78\xa9\x56\x66\x41\xe8\xc9\xc2\x0a\x93\x99\x22\xa7\xab\xa9\x0f\xec\xfe\xb5\x3e\xdc\x7b\x34\x9f\xef\x3f\x2c\x32\xa8\xc9\x42\xab\xa2\xf0\x42\x6d\x4c\x6f\xd1\xe9\x15\xfd\xd2\x53\x85\x72\x5a\x23\xb6\xbd\xf7\x08\xb7\xe7\x0e\x3b\xf3\xb9\x3c\x64\x49\xea\x89\x13\x4c\xfe\x26\x5a\xc9\x22\x8f\xc9\x51\x76\x1c\xde\xab\x0d\xa9\x1d\x3a\x2b\xdb\x26\x56\xb5\x6d\xef\xd1\x3f\xc5\x7c\x2e\xfe\xb9\xff\x90\x86\x43\x72\xb0\x6f\x7f\x3d\xec\xa0\x7e\x28\x0e\x1f\x3f\x9c\xcf\xbb\x9d\x9d\x43\x91\x92\xa3\x59\xf7\xe0\x37\xdd\x12\xed\x47\x0f\xad\x5f\x2f\x7f\xb1\xbf\xdf\xab\xbe\xd8\x7b\x54\x10\x2d\x31\x1c\xb0\xf7\x57\xcc\x9f\x94\xf2\x26\x20\x43\xf3\xc3\x4e\x3f\x9b\x01\x1e\x6f\xc9\xc2\xef\x1d\xa4\xce\x99\xa8\x36\x0d\x5a\x2d\xda\x33\x4c\x1f\xf5\x89\x60\x5d\xd0\x36\x9d\xcb\x12\xd3\x47\xb4\xd9\x34\xb0\x8b\x9c\xcd\x79\xca\xe1\x36\x7d\x4e\xa5\x77\xcb\x11\x81\x35\x41\x69\xe3\x3a\x24\x93\x62\xd6\x38\x7b\xff\xee\x95\xd6\xd3\x63\xab\x86\x98\x91\x83\xd3\x21\xd1\x8c\x53\x63\x2d\x2f\xef\x41\x4f\x55\x34\x52\x22\x8e\x9d\x8a\x44\xc9\xda\xf8\x34\x9a\x4c\x13\xcd\xfd\xb1\x68\x36\x9f\x9a\xf9\xc2\xc9\x7d\x10\x78\x46\x19\xe0\x03\x31\x80\x60\xe0\x49\x57\x47\x9a\x8f\xed\x6a\xb0\x22\xc8\xc0\x11\x4a\x45\xca\xa9\xc4\xe5\x91\x13\x49\x8e\x86\x6b\x4b\x68\xab\x1e\x2d\x97\x39\x59\x5f\xc6\x10\x54\x2b\xb0\xca\xcc\x5b\x71\x74\x62\xc0\x71\xd7\x3f\x9e\x46\x32\x16\x5f\x8e\xdf\x81\x7f\xe2\xdd\x07\x57\x9e\x74\x63\xcd\x75\x12\x43\xf0\x2e\x7f\x3e\x15\xb7\x7a\x01\xc1\xcf\x15\x47\x5c\xb6\x23\x9b\xa0\xa4\xc8\xc2\x56\x4c\x05\x99\xe6\x78\x71\xfe\x54\x7f\x4a\x87\xc2\xea\x74\x33\xc0\x21\xb0\x46\x89\x31\xe1\x32\x26\x74\xbc\x86\x43\x7b\x9d\xc3\x08\xf5\xb6\x20\x95\x58\xa1\x1c\x91\x0e\x44\x46\x83\x2e\xbf\xda\x69\x45\x14\x14\xbb\x23\x1f\x86\xe5\xe4\xdf\xc5\x1a\x71\xad\xc9\xa9\xb1\x6a\xfb\xbc\xe5\x00\x66\x55\xe0\x1e\xa7\x0b\x3c\xc6\x9a\x87\x9e\x11\x69\x96\xd7\xa3\xf1\xf8\x38\xed\x95\x57\x82\x0f\x84\x8a\x09\xa5\x10\x94\x7b\xcb\x1e\xb9\xc2\xbd\x49\xdb\x3f\x87\x3b\x9d\xce\x7c\xbe\xdb\xe9\x1c\xb2\xec\x15\xcd\xc5\xa2\x51\xcd\x59\x51\xd8\xf4\x25\x9c\x48\x72\x3b\x34\xeb\x74\x4f\x31\x45\x74\x4d\x6b\x38\xb2\xb1\x7f\x1e\x59\x5b\x78\x46\xee\x86\x36\xa1\x98\x59\x3e\x89\x84\xc4\x0d\x70\xf3\x75\x41\x7b\x5a\xdd\xdd\x4b\x37\x9a\x0a\x49\x12\x37\x78\x0f\x89\x3b\xe0\xb0\xd5\x59\xce\x3e\x81\xbc\x75\x3d\x24\x06\xc0\xa0\xd9\x5a\x9f\xad\x26\xf8\xd9\xd3\xae\xdf\xb3\x59\xe3\x30\x88\x24\x9d\x69\xb6\x8b\xcc\x0c\xb1\xd9\xcd\x5c\xdf\x58\xda\x39\xb9\x77\x53\x14\x7c\xca\xf5\x41\xba\xb3\x50\x5f\x3d\x55\x62\x20\xa4\x0e\xf9\x38\x36\x36\xd0\xc0\xcc\x52\xe5\x06\xfb\xd4\x58\xcc\x6e\x3a\x03\x4c\x91\x7d\x97\xdb\x86\x65\xe9\x05\x12\x37\xe0\x45\x14\x46\x2c\xe4\x80\x3c\x1f\x12\x41\xfb\x64\x05\x3d\x4e\x6a\x3e\xb7\x0d\x05\x8e\x3d\xec\x2e\x5c\x9f\x7a\xf8\x54\x56\x42\x5c\xee\x47\x4a\x13\xba\xa8\x6b\x3b\xd5\x50\xd3\x0e\xf8\x9e\x76\x7d\xe0\x95\x29\x20\x99\x11\x0d\x85\xf3\x6d\x46\xbe\x0d\xf3\xfc\xf4\x48\xf7\x17\xb5\x26\xad\x8d\x33\x72\x7a\xd2\x0d\x3e\x36\x9b\x44\xb7\x98\x33\x71\xcc\xfc\x0e\x44\xfa\x33\x74\xec\x38\x16\xec\x7b\x2c\x46\xcf\x6f\xa7\x48\xd5\xf2\x48\x1e\x69\xa3\x59\x3f\x59\xaf\xf3\xcb\x64\x3c\x46\x83\x6f\x92\x96\xdc\x9c\x52\x16\xd2\x5d\xc4\x8e\x99\x99\x36\xd4\x24\x4b\xe1\x03\x11\x6b\x77\x7b\xbc\xd5\x3a\x94\xcd\x26\x86\xc3\x8a\x5b\x11\x90\x80\xd2\x66\x33\xda\x2a\x43\xf6\x0a\x84\x61\x1e\xc1\xd5\xee\xc2\x30\x0d\x57\x09\xcd\xf4\x0e\x33\x3f\x3f\x13\xe7\xe1\x45\x6f\x78\xde\x6e\x87\x17\xcc\x37\x8a\xb3\x8f\x6a\x73\x92\xa5\xf1\xfb\xee\x83\x38\xef\x5c\x80\xb0\x22\x02\x38\x1c\xe3\xa9\x7d\x1b\x8c\x92\x55\x9a\xcf\xe6\xe2\x15\xd3\x90\x26\x18\xd4\xe5\x65\xc5\x66\xcb\x4a\x3d\x48\x9d\x3c\xdf\x5d\x66\x00\xd5\x74\xcd\x70\x48\x82\x56\xeb\x9f\x2c\xc9\xb5\x90\x92\xe3\x85\xab\x11\x6a\xe7\x59\x18\x47\x7b\x17\xb2\x74\xd1\xca\x34\x52\xe5\x29\x9e\x72\xc8\x73\x75\xd1\xd3\xe7\xed\x36\x86\xb3\x5e\x6b\x22\xb0\xb1\x79\x9e\x7f\x6c\xae\x84\x02\x7c\xa9\x8a\x9d\x0b\x08\xe0\x38\x57\xf5\x29\xfc\xd8\xac\x09\xe9\x6c\x4c\xcb\x63\x59\x19\xd9\x9e\xcc\x65\x72\x90\x0d\xab\xa0\xe9\xfe\x99\xd5\x40\xf2\x00\x56\x7b\x78\x87\x43\x2a\xaf\x29\xad\x22\xce\xbd\xfb\x35\x78\x4a\x61\xe5\xc0\x7c\x5a\x6b\x01\xcb\xe6\xa6\x14\x75\x1b\x33\xe0\xfd\x8f\x0d\x25\xd5\xe1\xa1\x5c\xd0\xde\xd9\xda\x7a\xd5\x3f\xff\x29\x37\x64\x3a\xfe\x27\x7e\xee\x55\x12\x2c\x8a\x75\xc7\x21\xf3\xf3\x5e\xaf\xa5\x16\xea\x86\x8f\xcb\x56\xd0\x53\x49\xc4\xc6\xd3\x5e\x79\x21\x45\x6b\xe7\x71\xdf\x96\xa2\xdb\x1b\x33\xf2\x5d\x83\xf3\xa7\x6c\x34\x1a\x0d\x07\x66\xe4\x29\xfe\x72\x40\x96\xf7\x43\x7e\x2f\x97\xb8\x23\x97\x7a\x6d\x03\x5b\x46\xdd\xef\x18\x49\x96\x17\xfe\xa3\x5c\xf8\xf1\xc3\x43\x46\x24\x3b\xc3\xa3\x56\xcd\xa6\x3c\x64\xdd\x9d\x9d\x02\x56\xea\x02\x36\x07\x3b\x64\x8f\x3b\xcd\xe6\xc1\xfe\x21\x2b\xf9\x43\xd5\x6a\xc8\xfd\x87\xcd\xe6\xde\xa3\x0a\xa4\x2e\x41\x5a\x62\xe6\x73\xac\x67\x3e\x47\x24\xa5\xbb\x30\x74\x25\xbb\x40\x71\x01\x45\xe5\xbd\xcb\x4b\x37\x3c\xac\x29\x11\x94\xde\x3b\x0e\xda\x34\xa8\xf8\x6a\xf6\x11\x42\xcd\x26\x30\xd4\x8c\x54\x3c\x15\xa5\xc3\xc6\xca\x0d\x30\xbf\xf0\x00\x92\x75\x6c\x76\x47\x42\x0d\xd2\xdd\xee\x6b\x2f\xc1\xd8\x69\x6e\x46\xb8\xfc\x31\x01\xfc\xc7\x22\xb6\xe6\xc7\x9a\x73\x7d\x36\xaa\xb6\xbd\x83\x3b\x64\xdb\xd5\xac\xb8\x46\xf3\x32\x64\x48\xe0\xec\x0e\xdd\xeb\x3e\x68\x37\x80\x3b\x32\x44\xcc\xa0\x5d\x81\x39\x91\x13\x50\x8c\x83\x66\x02\xcf\x7f\xf8\xab\x34\xc7\x06\x16\x5a\x6b\x07\xce\x48\xa4\xc1\x66\x18\x46\xc3\x03\xce\x0c\x17\x41\xbc\x12\x95\x64\x12\x7e\x01\x9d\x2c\x63\x1a\x6b\xd6\x85\x81\x66\x3b\x70\xa3\x59\x07\xb6\xf5\x5a\xd9\xb1\xa0\x30\x59\xa9\xfb\xe6\x39\x20\xe0\x4a\x6f\x3a\x0f\x9c\xa7\xf9\xa5\x30\xdd\x00\xd8\x29\x03\x8e\x36\x00\x76\xcb\x80\xb3\x35\xa4\xa5\x27\xfe\xe0\x6e\xcd\xf7\x9d\xf4\xfb\xad\x66\x2f\xe1\xfa\x2f\x90\x1c\x69\x66\xea\x5d\xc0\x89\x66\x4a\xc2\xa9\x66\x1f\xe0\xb9\x66\x33\x09\xdf\x56\x0f\x48\xcb\x71\x16\xf0\x5d\xaf\xf7\x48\xfe\x01\x12\xbe\xda\x3c\xb3\xf0\x74\x2d\xdc\x31\x99\x91\xdf\xc1\xe6\x89\xa1\x70\xa9\xd9\x46\xa6\xdd\xd2\xf5\x34\xce\x02\xf3\xe9\x5a\x86\xc5\x3c\xff\x59\x4a\xe7\x32\x83\xc2\x1b\xcd\x7e\xc2\xbb\xbf\x40\xde\xcd\x52\x67\xe7\x2a\x79\x5a\x83\x84\x84\xa9\x76\x17\x6b\x88\x34\x96\xeb\x49\x26\x40\xb1\x04\x34\xe3\xa6\x82\x0f\x7a\xfd\xcc\x7d\x67\x67\xce\x99\x69\xe0\xab\x0d\x70\x6f\x0c\xdc\x8c\x7c\xd0\xd0\x81\xdf\x15\x51\xb4\xdd\xa5\x36\xd7\xf7\x59\x75\x08\xb2\xe0\xde\x92\x49\xdd\x29\x65\xf4\xae\x24\x00\x55\x7d\xe5\x75\x3b\x3b\x7b\xbf\x11\xd5\xc6\x0f\xb4\x55\x29\xd8\xa5\x6d\x4c\x16\xd9\x3a\xd8\xdf\xdf\x3d\x58\xc0\xeb\x35\x33\xf9\x52\x43\xa4\xd3\x89\xf5\xf1\xef\x90\x83\xae\xcd\x3a\x4d\x97\x44\x1a\x15\x4d\xa6\xeb\x7d\x97\x52\xcf\xbe\x6a\xc9\xf3\x6e\xf1\x7e\x87\x52\x54\x77\xe0\x78\x5d\xb7\x39\x7f\xca\x3f\x25\x71\x5a\xdf\x34\x91\xad\x2e\x6d\x11\x87\x36\x9c\xd6\x5b\x45\xbe\xea\x34\x91\x09\x7c\x5d\xd9\xa2\x19\xf9\x69\xba\xfb\x8c\x2e\xe0\xe7\x32\x72\xe9\xa5\x7c\xb1\x22\x03\x51\xf9\x08\x94\x44\xde\x5b\xf2\xc2\x7e\xcc\xf2\x71\x76\x6d\x32\x87\x22\xac\x36\x4f\xf7\x00\xb2\x74\x05\x08\xf9\x43\x11\xb3\x00\x70\xbb\x52\x29\xb3\x52\xce\xc8\x89\x06\xad\x4d\x87\x13\xa3\x35\x19\x5d\x1b\xd9\x2f\xe9\x3b\xae\xd3\xd2\x9e\x73\xfe\x0f\xcc\x56\xf6\x8f\x0b\xc7\x72\x3c\x37\x0c\x99\x1f\x11\x69\xe4\x1b\xf1\x59\xb4\x81\x0f\x01\x73\xce\x6d\x5f\xb9\x9c\xb6\x9c\x0b\xa7\x8c\x37\xd8\x84\x65\xc7\xcb\x4e\xe9\x70\xd3\xae\xc8\xf5\x71\xca\x44\xae\x9f\x9d\x0a\x0b\x19\x51\xae\xdf\x77\x4e\xaf\x44\xe3\x4d\x1c\x49\xf7\x99\x08\xa2\x81\x70\x23\x29\x3e\x0e\x1b\x5c\x37\xbe\xc7\x91\x74\x5a\x56\xfd\x70\xe0\x35\x36\xd3\x73\x96\x40\x1d\xda\x72\x1a\x43\x1e\x8e\x31\x27\x5b\x43\x5f\x89\xc6\x30\x1a\x8f\xa3\x99\xcd\x28\xf5\x4d\x93\xdf\x15\x89\xa8\x81\x9a\xf1\xbb\xd8\x73\x7a\x35\xc5\xc6\x28\x33\xd8\xa0\x10\x66\xe4\x95\x86\x63\x8d\xd1\x37\x0b\xc9\x38\x13\x2c\xc2\xdb\x57\x12\xa6\x4a\x4d\x4c\xb5\x50\xe7\x98\xcb\x46\x28\x75\xd4\xe0\x2b\x5a\x80\xa9\xed\x64\xd4\x98\x46\x71\x1c\xfa\xe1\x38\xd4\xa1\x88\x9d\x96\x6d\xf4\xfa\xf6\x6d\x39\xc5\xf6\x6f\x80\x03\x1f\x61\xb6\xf9\x74\xe0\x43\x86\xe5\x3f\xa9\xc8\x1f\x8b\x89\xad\xc4\x34\x19\x37\x59\xd7\x61\x6d\x39\x9e\x69\x26\x6a\x70\xde\x72\xd9\x51\x78\x23\xa4\xc5\x80\x70\x0e\x6d\x91\xb7\x8a\xcc\xc8\x73\x0d\x7b\x80\x5d\x97\xbe\x0e\x8c\xe8\x7a\xa6\x37\x1c\xfe\x2d\x2f\x5a\x10\x78\x1a\x06\x78\x50\x1a\x5e\xe2\x81\xec\x17\x9a\x7d\x86\xf7\x6b\x45\xda\x0f\xa2\xe8\x83\x1f\xd6\x88\xfc\xac\xd9\x0b\x4d\x66\xe4\xbd\x86\x1d\xd8\xdd\xa1\x14\xbe\x68\x76\x4b\x9e\x19\x61\xf7\x59\xc3\x4b\xf3\x1f\x85\x27\x9a\x85\xf0\x63\xfd\x42\x8d\x47\x9f\x21\x5c\xab\x8c\xdb\xba\x3e\x69\x76\x04\x6f\x35\xfb\x02\xbf\xaf\x5e\xbb\xb2\x6c\xb2\xf0\xc7\xda\xaa\xb2\x9b\x16\xfa\x98\xbd\x1b\xa4\x60\x2f\x40\x09\xb6\x0d\x5a\xac\x76\x54\xf7\x32\x83\x7d\x46\x94\x80\xdd\x1d\x90\xd6\x8f\xe5\x83\xb6\xf3\x2b\xed\x4a\xed\xf2\x45\x96\x01\x4f\x14\xab\xd9\x6b\x14\x19\x76\x55\x41\x23\x5d\xfc\x55\x3d\x2f\x34\x51\x0f\x76\x77\x72\x39\x93\xfb\xbd\x0b\x02\x5c\xde\x93\x6c\x46\xb4\x40\x49\x97\x61\x4e\x96\x31\x63\xd2\x40\x91\x61\xde\xdd\xf9\x4d\xb9\x02\x04\x7b\x9b\x8e\xd8\xee\x0e\xe8\x76\x97\x52\x90\x4c\xf6\x0d\x0f\xba\x23\xea\x29\x77\x04\x06\xbb\x30\xd8\x4d\xe9\x5e\x7e\x7e\xfb\x99\x86\xdf\x0d\xd4\x90\xb6\x34\xcc\xc8\x1f\x18\x3a\xff\xdb\x67\x4d\x11\xb2\xc8\xaa\x57\x86\x4c\xb9\x00\x3f\x53\xe0\x82\x3d\x87\x40\x6c\xd8\x89\x49\x17\x6d\x75\xd8\x29\xda\x9d\x08\xd8\xea\xc2\xfd\xc8\x13\x20\x3c\xfd\x60\x77\x67\xde\x81\xa1\x97\xa4\x2e\x16\xce\x52\xcd\xea\x8e\x3c\xc1\x36\xa1\x03\xa6\x67\x54\x4b\xc5\x54\xdb\x34\x92\x69\x10\x76\xb4\x38\x08\x0a\x09\x4b\xf0\xc4\xf2\xca\x0e\xeb\x1c\xca\xfc\x14\xe9\xbf\xef\xee\x80\x60\x88\x57\x83\x6c\xeb\xd2\xfe\xde\x0d\x09\x84\xa9\xa9\xad\x4d\x0d\x12\xce\xa0\x08\xf7\xf8\xa2\x0d\x37\x8b\x15\x3c\xba\x25\xdd\xed\x05\x0c\x05\xf3\x25\xf8\x82\xc5\x12\x62\xb1\x59\x81\x1b\x57\xbf\xaf\x58\xb8\x32\xce\xae\x05\x68\x75\x6b\x81\x59\x3b\xb9\xbc\xca\x6e\x37\x5b\x2c\x60\x20\x18\xd7\x70\x23\xd8\xe6\xb3\xf8\xf7\xfc\xc0\xe3\xe0\x07\x9e\x02\x9e\x78\x02\xfc\xc4\xd3\xe0\xdf\x79\x12\x82\x53\x0f\xfb\x72\x5b\x30\x2d\x61\x22\x36\xcf\xcb\x2b\xc1\xa4\x84\xa9\x58\x2b\x08\x0e\xbb\x7d\xe5\xdd\x91\x2b\xc3\x7b\x13\x7b\x4c\xd1\xb0\xcd\x48\x30\x2e\x61\xb6\xb1\x9c\xe3\xd8\x82\x1d\x48\x6f\x52\xb9\x13\x6b\x8e\x4f\x75\x30\x4e\xa6\xa2\xd6\x80\x60\x7b\xbb\x78\x58\x61\x6f\x9f\x31\xdd\xef\x7a\x1d\x48\x98\xe8\x25\x45\x68\x53\xab\x55\xc4\xdf\xd6\x12\xb4\x63\x50\x64\xba\x4d\x95\x9f\xe6\x3a\xd2\x3d\xc5\xba\x9d\xdf\x54\x8b\x97\xb6\x63\x12\xc6\x44\xff\x48\x7b\xd7\x9a\xd8\x9a\xda\xca\x53\x74\x01\xb7\xeb\xe6\x44\x38\x24\x81\xa9\x64\x3e\x9f\x91\x6d\x01\xce\xff\xe5\x40\x42\x4b\x75\x58\x8a\x66\x64\x24\xc0\xf1\xcc\x37\xa4\x26\x5d\xc7\x39\xae\xe3\x55\xe0\x80\x71\x3c\xf7\x79\x27\xc8\x8c\x4c\x05\x04\xad\x2e\x66\xeb\x49\xc5\x0d\x2f\x4c\xc7\x23\xdd\xe3\x2c\xf7\xb9\x5e\x6b\x32\x24\x37\x02\x75\xd9\x99\xb0\xd1\xc4\x3c\x3b\xdd\xb2\x58\x02\x4a\xe0\x48\xe7\x5f\x29\x5c\x8b\x95\xab\x91\x6d\x9d\xa8\x37\x28\x49\x1b\xf4\xc0\x01\xeb\x81\x4a\x4c\x83\x12\x96\x14\xf9\xb4\x6e\xc8\xad\xa5\x65\x6a\x38\x55\x50\xc4\x88\x94\x25\x65\x82\x52\x30\x83\x29\x4b\x66\x01\x47\x62\x85\xc1\x60\x29\xd1\x75\x4a\x44\x4a\x49\xdf\x01\xab\xfa\x09\x9b\x66\x54\x14\x94\xdc\x92\x6b\x53\xc5\xb5\xb6\xfd\x29\x5a\x5d\xd0\xd4\xd0\x83\xd4\x08\x74\xd8\x56\x41\x6d\xcf\x18\x52\x4e\x56\x8a\xa0\x40\x97\xf2\xc3\xa6\x84\xe8\x94\x90\x7f\xcb\x92\xe0\x1a\x93\xe9\x5e\x33\x5d\x10\x72\x47\x8e\xca\x84\xe8\x56\x17\x93\xa1\x21\x19\xba\xbc\xd5\x91\x02\x1e\xa5\x97\xd2\x9d\x0a\x26\x24\x3c\x17\xab\xf5\xe9\x53\x01\x78\xcd\x96\xf7\xe0\x81\x03\x92\xf6\x67\xe4\xc4\x4c\x32\xac\xe2\x21\x48\x4a\xbd\x02\x26\xae\x00\x75\x2d\xd0\x23\x04\x32\xea\xfe\xb7\xe5\x19\xd9\xeb\xd1\xce\x02\xbe\x0b\x76\x24\x41\x4a\xf6\x5d\x98\xc9\xf8\x74\x2d\xb3\x88\x22\x41\x8c\x40\x2e\x16\xf6\xf8\x09\xcf\xde\x5b\xee\x8e\x18\xb7\xef\x23\x3b\x5a\x11\xc2\x46\x25\x58\x0b\x51\x4a\x9a\x92\xc0\x0c\x43\xce\x66\x18\xd1\x85\x97\x29\xb8\x1c\xf6\x3b\x9d\x43\xdd\x47\x33\xc9\x90\xf1\x5a\x1b\x15\xd5\xbb\x25\x4f\x85\xf5\x9a\xb4\xba\xa8\x84\x16\x5d\xbb\x8c\x4a\xad\xf9\xaa\xea\xaf\xab\x17\x27\x5c\x8a\xf5\x5b\xed\x79\xfd\x1d\xcb\x45\x6f\x2a\x5c\x54\xbe\xea\xe5\x8e\x5c\x0a\x58\x1f\x07\xa4\xc1\x5e\x02\x96\x3b\x57\x28\xbc\x13\xec\xb9\x84\x0f\x62\x5d\xd0\xc8\x8c\xbc\x13\xab\x76\xd8\xbe\x0b\xa2\x30\xea\xc6\xa2\x79\x55\x21\x5f\x63\xf0\xd9\x6a\x14\xea\x57\x50\xcf\xd2\x7b\x7f\xe9\xc2\x4c\x61\x5b\xc5\x99\x60\xc7\x12\x5e\xaf\x5d\x14\x2e\x25\x41\x8c\x67\xc2\xb6\x91\xda\x5b\x06\x36\x78\x9a\x3e\xac\xac\xbe\xb3\x00\xa2\xd8\x8c\xbc\x11\xf0\x3a\x45\x05\xd8\xaf\xaf\x04\x89\x34\x85\xef\x82\x9c\x51\x48\xad\x52\xb1\xa1\x02\x64\xee\x05\x85\x63\xb9\xde\xbb\xf2\x41\xa4\xcb\x57\xef\xa3\x74\x4f\x79\x7c\xcd\xee\x7d\x4f\x4a\x08\x3c\x2e\x8d\x92\x2e\x41\x78\xc7\x12\x86\xf9\xb9\xe4\x22\xa8\x48\x94\xf4\x07\xfe\xce\xdb\xea\x82\xff\xdd\x73\x1c\xf0\xaf\x2b\xd9\x4e\x8f\x2b\x70\xc2\x9b\x46\xe0\x7f\xab\x40\x7c\xad\x40\x9c\x7a\xc7\x82\x38\x0e\x05\xfe\x35\x7b\xba\xf1\xee\xf9\x77\xef\x48\x03\xbf\x36\x7f\x83\x5b\x4f\x02\x9f\xe2\x0b\x85\x7f\x7f\xe2\xdf\x23\x9c\xf4\x71\x5e\xfe\x2c\x7b\xf2\x07\xe6\xbb\xff\x39\xfb\xfd\xc6\xdb\xea\x96\xb3\xf7\x95\xea\x27\x33\x72\x1b\xa1\xf8\x46\xa9\x72\x17\x41\x17\x24\xf5\x24\x6d\x39\x0f\xf8\x34\x7c\x70\xb3\x53\xba\x1f\xf0\x99\xd8\x98\x06\xf6\x65\xf5\xf3\x7e\xed\xf3\x0b\xb1\xca\xcd\x56\x64\x97\xab\x7e\xde\xad\x7d\xfe\x2c\x36\xe6\x98\xfd\x52\xfd\xbc\x57\xfb\xfc\x64\x73\xdd\x3f\x36\xd7\xfd\x69\x33\xf2\xb7\x9b\xdb\xfd\xfb\x66\xca\xff\xd8\xdc\xa9\x32\xd9\x48\xb9\x4a\x36\x96\xd6\xc9\x46\xca\x45\xb2\x91\xf2\x24\xd9\xd8\x2d\xbc\x46\x5a\xbd\x65\xc1\xe6\xe2\x51\x52\xde\x2b\x47\x23\xc3\xb6\x0e\x56\xae\x93\xef\x23\x63\xa4\xcc\xc8\xef\x51\xea\xf6\xb4\x4e\xa1\x02\x5f\x98\x6c\xbe\xd9\xf8\x8e\x7c\x46\x14\x76\xff\xb8\x48\xef\x91\x54\x12\x8e\x3e\x89\xea\xfb\x92\x23\x32\x41\xb1\xa4\x69\xe6\xb3\xbb\x13\xa5\x3d\x69\xe5\x6e\xf7\xa3\x84\xfc\xe3\xb5\xb4\x17\x4b\x84\x52\x8b\x91\x50\x5e\xc3\xf9\x47\x4b\xb6\xfe\xe1\xfc\x83\x7a\x41\x88\xf1\x06\xd9\xf2\x13\x25\x24\xcd\xf6\x2d\x06\xd6\x35\xa3\x69\x8b\x38\x8d\x41\x38\x0a\x75\x0c\x98\x46\x7f\x14\x69\xfb\x09\xeb\x36\xb2\x0f\x92\x90\xe8\x90\x28\xdc\x91\x2c\x52\x92\x26\x65\xe3\xa7\x74\x13\x70\x42\x64\x71\xd0\x96\xe3\xc9\x0b\x1f\x02\xb6\x85\xca\x6e\x6f\x45\x2f\xd9\xa6\xdd\x90\x69\x08\x09\x48\x0c\x28\xf0\x5f\x82\x74\xf9\x09\x6e\x9b\xe0\x5e\x26\xc7\x1b\x9f\xec\xad\x8e\x41\x7e\x00\x8e\x68\x68\x77\x8d\xf4\xa8\x0c\x11\xa7\xd4\xc3\x2e\x0f\xa0\x03\xf7\xfc\x04\x1d\x21\x45\x6a\x04\xdf\x58\x38\x2f\x3d\x61\x46\xdc\xe5\x8b\xf2\x88\x8c\xcb\x8c\x31\x0a\xc9\x8c\x5c\x85\x36\x0b\xec\x23\xe4\x8f\x72\xfb\x07\x65\x58\xd3\xb1\xd9\x30\x0c\xf8\x9d\x67\x7b\xb0\xb2\x43\x77\x53\xee\x2f\x32\x23\xb3\x10\xf6\x40\xd2\xf9\x7c\x0b\x9f\xbb\x9d\x0e\x48\xf4\x22\x9a\x0f\xf8\xa3\x54\x78\xbb\x54\x98\x10\xd9\x66\x5d\xfa\x60\x6f\xde\xa1\x6d\x22\x1f\x74\x3b\x9d\x79\x87\xb6\x88\x7c\xb0\x87\x4f\xa5\xd4\x3a\x7f\xc9\x97\xd7\xa6\x79\x6f\x23\x8c\x7e\x2d\xb2\x6d\x24\x15\x98\xe7\x61\x65\x40\xfc\x9b\x72\x7e\xf8\xa4\x32\x53\x9e\x86\x70\x1a\x56\xb6\x16\x47\xc9\x2f\xee\x4b\xde\x91\x57\x21\x26\xe4\xb7\x57\x06\x18\xc3\x33\xaa\x60\x9a\x95\x31\xc9\xbe\xa3\x55\x22\x1c\xcf\x19\xf2\x71\x2c\x4a\x2b\xc5\x5d\x05\xcc\xdd\x9e\xcf\x1d\x67\x2b\xbd\x7f\xda\xac\x5a\xc5\xb1\xf0\x32\xe0\x7b\x33\xd6\x97\x11\x7c\x0e\xe1\x54\x13\xd9\x72\x98\x53\x61\xf6\xeb\x64\xed\x0e\xa5\x73\xe9\xd8\x14\xd5\xb5\xad\xca\xa3\x0a\x21\xac\x60\xa5\xc7\x96\x95\xaa\x13\x83\xc9\x4a\xf8\xca\xbf\x60\x56\xcc\xe7\x9d\x43\x76\x47\x64\xb8\x4a\xa8\xe9\x0a\xed\x0b\xc0\xbb\x10\xfe\xcb\x13\xa9\x97\x47\x7d\x95\x32\xb4\x57\x39\xe4\x93\xe9\x04\xa7\x7c\x4f\xef\x69\xb2\x71\x91\x7a\xbe\x79\x99\xf9\x56\x9e\x18\xd2\xdd\xee\x9f\x26\xde\xf3\x84\xda\x1b\x1c\x8a\xd3\x82\x9b\xeb\x78\xba\xb9\x8e\xcb\x6a\x13\xd4\x10\x24\xc8\x61\x39\xe1\x7a\x15\x20\x1b\x68\x8b\xa7\x04\xf7\xae\x0c\x17\x27\xe4\x4d\x52\x15\x11\x1f\xca\xf3\xf5\x28\x24\xc7\xe4\x7c\x46\x06\x21\x04\x21\xf9\x9e\x90\x0e\xa5\xf0\x2e\x21\x27\x09\xee\x93\x00\x7e\xc9\x3e\x3f\xc5\x97\x70\x99\x90\xe5\xc1\x76\xfe\xfc\xd3\x8e\xf2\x3a\x80\xad\xce\x22\xc3\xf7\x0b\xa8\xec\x39\xc5\xfc\x1a\x60\x7a\x51\x6e\xc1\xab\xda\xec\x4b\x1d\x22\xd5\xd1\x38\xab\x02\x61\x48\x90\x4c\x63\xe9\x8b\x94\xe4\x35\xa0\xe2\xcb\xc7\xa4\xbc\x87\xe5\x7f\x2b\x42\xc7\x82\x57\x2d\x63\x20\x0f\xc1\x71\x60\x46\xde\x87\x90\x68\x38\x4b\x8c\x14\x1f\x02\x4e\xef\xd7\x09\x7c\x32\x75\x7d\xa4\x14\x7e\x84\x66\x71\xc3\x65\xb2\xa5\xcc\x3f\x25\x45\xba\x5a\xc3\x4f\xf4\x2a\x7d\xc1\xdd\xa3\x13\xcc\x14\x76\x8a\x93\xf7\x18\x33\x8b\xbf\xc2\xfd\x01\xce\x41\x2e\x6f\x2b\x4d\x86\x39\x3d\x01\x4d\xe3\x0a\xb7\x8b\x7b\xce\x64\x39\x8d\x38\x9a\x21\x48\xe5\xc7\xc4\xfe\x7b\xad\xe1\x4b\x48\x9c\x61\x38\xd6\x42\x19\x79\x94\x6e\x2d\x45\x6c\x46\x5e\x84\xab\x75\x95\xdb\x24\x13\xa2\x74\x01\xaf\x88\x84\x63\x72\x7e\x49\x9c\x38\x1c\x0b\x19\x88\x81\x83\x3e\x84\xc4\x52\xb5\x85\xee\x0c\x33\xd6\xc4\x09\xe5\x55\xe8\x87\x7a\x19\x42\x64\x10\x93\x64\xc5\xd7\x24\xfb\xca\x03\x8d\x69\x9d\xca\x9f\x3b\x46\x80\xd8\xcf\x4a\x04\x22\xbc\x11\xca\x81\xbb\x94\xbd\x88\x33\x52\x51\x32\x75\x20\x32\x1c\x94\xeb\x05\x9d\x43\xdc\x2c\xea\x3b\xfd\x6c\x0f\xa5\x69\x40\x3c\xa7\x24\xe2\xbf\x26\xa9\x7e\xd4\xb6\xc7\xe7\xb6\xe7\xf3\xf4\x69\x50\x7a\x16\x45\x4f\xcb\x5e\x01\x2c\x2c\x90\x7d\xe0\xc5\x28\x0f\x70\x90\x05\x64\xdb\xd3\x1a\xd3\xc7\x6b\x4c\x1f\xaf\x5d\x51\x38\xa7\x2e\x43\x63\xa8\xa0\x3c\x0e\x00\x7f\xda\xe3\xcd\x18\x13\x82\xc9\x62\x04\xcd\xde\x67\x49\x46\xf3\x94\xd6\x58\x51\x98\x56\x14\x62\x45\xa1\x1b\x80\x66\x84\xb3\xd0\x1d\x50\x53\x9e\x71\x57\x40\xc0\xc2\x5a\xa5\x1d\xe0\xae\x0f\x3c\xab\x74\x89\x86\xc8\xf5\x21\x72\x03\x88\xdc\x01\x44\x86\x06\x4d\x33\x48\x43\x86\x42\x32\x0a\x4b\xec\xbf\xd2\x87\x83\xac\x0f\x07\xcb\x7d\x68\xba\x40\x60\x03\x13\xbc\x32\x97\x33\xe1\x06\x10\x30\x61\xc8\x62\x62\xa9\x27\x43\x5c\x05\x87\x6c\x45\x6f\x6a\x5c\xdd\x8a\xde\xcc\x12\x0c\xd8\xde\x2c\x17\x14\x8c\xe0\x75\x58\x14\xab\x25\x66\x5e\x09\x8a\x95\x4b\xac\x5c\x62\xe5\x72\xa9\x47\xb3\x60\x99\x94\x16\xa2\xcc\x80\xd3\x95\x83\xd9\x85\x10\x86\x20\x96\x68\x29\xf5\xe9\xb3\x25\x5d\xe7\xcb\x10\x26\x55\x2d\xe6\x65\x15\x66\xe2\xe7\x9b\xf4\x85\xb5\x5a\xd3\xa9\x7c\xb8\x0f\x84\x31\xfa\x83\x8f\xc6\x9a\xae\xc0\xbe\xaf\xc2\x7e\x43\x2d\x21\x32\x0d\xc1\x7f\x8d\xda\x31\x1d\xc2\xa5\x0f\x2f\x12\xe2\x9c\xb7\xcf\xff\xfc\xf3\xe2\x7e\x41\xe8\x6f\xad\xbe\x0b\x7f\xfe\xf9\xe7\x9f\xff\x63\x7b\xfe\x6f\x7f\xfe\x19\x5f\x38\x94\xc2\x1d\x79\xe3\xa3\xdf\xf5\x32\x5a\x7d\x18\x27\x78\xbb\x30\xda\x91\x59\x04\xa8\x75\x8b\xd4\x0d\xa1\xcf\x75\xc9\xee\xdc\x67\xd3\x18\x30\x76\xed\x8d\x30\xd2\x4d\x52\xda\x72\x16\x4e\x65\x52\x7f\xd9\x6c\xb2\x3d\xa9\xad\xe0\xf5\x25\xfc\x47\xcd\x22\xac\xaf\xe1\x9f\xaa\xdf\x1f\xd7\x0d\xe9\xea\xe7\x47\x75\x43\xfa\xd7\xb5\xd8\x5b\x59\xd5\x62\xef\xab\x03\xf6\x47\x19\xd3\xef\x09\xb1\xe2\x59\xf2\x89\x70\xe0\xcc\x27\x66\xd4\xac\x74\xb4\x69\x9d\xd2\x77\xdf\x52\x21\x1d\x1f\x8b\x91\xb8\x75\xe0\xad\x59\x48\xfc\xab\xec\xed\xf3\x1f\x09\x1f\x9b\xee\x9d\x0e\xe1\x99\x6f\x97\xbe\xb7\x43\x43\xc3\x88\xd6\x16\x68\xc9\x57\x87\xc3\x29\x5e\x2c\x79\x33\xf2\xd2\x30\x42\x77\x6f\xaf\x43\x5b\x0f\xbb\x8f\xf7\x0e\x1e\x19\x09\xa5\x0e\x3b\x7d\xd5\xee\xee\x1d\x74\x1e\x1f\x78\x8a\x3e\xc0\xa7\x87\xf3\x8e\x99\xa5\xf6\xf5\xc3\xdf\xb4\x99\x7e\xa2\x4d\x04\x7e\x45\xb3\x44\x3c\xd8\x3d\xd8\xdf\xb1\xc6\x8a\x7d\xfd\xf8\x60\xde\xa1\xd4\xbc\x9e\x67\xd1\xb9\xf7\xfc\x77\x8f\x48\x26\xda\x64\xf7\x60\xff\xb7\xa4\x45\x92\xd4\xbc\x49\x52\xf3\x86\xd2\x36\x21\xdd\xfd\xdd\xdf\x88\x62\x64\xff\x37\xd9\xda\xa1\x0f\xba\xfb\xbb\xa6\x86\x1d\xfa\x60\xdf\xfc\xdb\x05\x3e\xf5\x04\x53\x2d\xa2\x0e\xbb\x9d\xfe\xae\xd7\x7e\x4c\xc1\xdf\xf5\x92\xd6\x5e\xa7\xf3\x9b\x6e\x91\x9d\x43\xd1\xef\x78\xdd\xb2\xa9\xa3\x79\x65\xfe\xc8\x18\xf6\x60\x46\x74\x0c\x22\x46\x2e\x25\x4e\xdb\x30\xb0\x8c\x61\x07\x56\x6d\xd6\xd5\xb7\xea\xba\xb5\xad\xba\x9d\xda\x56\xdd\x6e\x2d\x33\xc3\x5e\x2d\x8f\xc3\x7e\xed\x38\xfa\x41\xf5\xf6\xc3\xc6\xc3\xda\xcd\x83\x8f\x6a\x89\x00\x1e\xd7\xae\xca\xea\x76\xea\x57\x5f\x75\xbb\xf5\xdd\xc2\xee\xce\x62\x41\x66\xe4\x93\x9f\x36\xba\xda\xea\x19\xf9\xec\x17\xbd\x71\x5a\x7a\xff\xa5\xf4\xde\x2b\xbd\xff\xb1\xe6\xbd\x2a\xf5\xaa\x9b\xbe\xdf\x85\x19\x79\x92\x57\x8c\xff\x6b\x39\xdf\x4a\x62\x41\xf0\x8a\x2a\x76\x65\x73\x98\x5a\x71\xef\x8f\xa8\x59\x86\x8a\x7d\x92\xfb\xe0\x95\x87\x72\xef\xa3\xb7\xa5\x9a\x4d\xbc\xdc\x7c\x3e\xd7\x7d\x7c\xde\xf1\x76\xbd\x2e\x3a\x44\x5d\xff\x5b\xd9\xd5\x54\x99\x11\x87\x9d\x54\x65\x1d\xc7\x64\xe5\x44\xaf\x07\x13\x49\x34\xc3\x54\x29\x36\xa8\x71\x49\xd4\x03\x31\xef\xf4\x65\x8b\x7c\xd3\xf6\x99\xb6\x88\x6e\x39\x0d\x87\x52\x0f\x77\xd9\x42\xbb\x7d\xb0\xc0\x09\xec\x00\x0f\x8d\x21\x00\x83\x98\xe2\x49\x8f\xc2\xcf\x55\xa2\x6d\x2b\x77\x19\x34\x9b\x5b\x65\x9f\x41\xe6\x41\xb0\x4e\x83\x92\x13\xac\xdc\x30\x33\xad\xac\xdf\xc0\xa8\xc8\x93\x18\x24\xec\xd1\x76\xfa\xd4\xed\x74\x68\x2b\x7b\xdb\xe9\x94\x29\x88\xf8\xff\x1f\xd9\xbd\xf0\xdc\x95\x9b\x87\x71\x0f\x5d\xc3\x89\xf7\xd5\x66\x75\x6a\xcd\xea\xd6\x9a\xb5\x53\x6b\xd6\x6e\xad\x59\x7b\xb5\x66\xed\xd7\x9a\x75\x50\x6b\xd6\xc3\x5a\xb3\x1e\xd5\x5b\xf5\xb8\x7e\x55\x5d\xb7\xb3\xd4\xcc\xb2\xdf\x7d\x58\x9a\x45\x44\xb0\x19\x99\xe1\xb0\xa3\xcc\xc6\xdb\x9a\xa4\x99\x51\xf8\xda\xe8\x6c\x28\xb0\xf1\xbd\xce\xc1\x8d\xaa\xd4\xdd\x3b\xe8\xe2\x6b\x51\x81\xcd\x95\x77\x23\x67\x55\xab\xdb\xe9\xfc\x26\x5b\x7b\xbf\xe9\x96\x70\x79\x8b\x08\xd7\xef\x77\xbd\xb2\x5b\xca\x2f\x53\xa3\x98\x21\x8e\x29\x26\x29\xdc\x73\xed\xc9\x76\xc0\x89\x42\xc1\xad\xca\x01\xd3\xa3\xd8\xe8\x85\xbb\xa8\x1e\x72\x5d\xc2\x16\x97\xd9\xfc\xd1\xc1\x9e\x40\x46\x7f\xd8\x7d\xdc\x3d\x28\xdf\x13\x33\xe6\x95\x25\xfc\x4d\x0c\xdd\x5d\x2b\x77\x4a\x2e\x3d\x5e\x8f\x09\xef\x4f\x34\x71\x4e\xae\xa2\x64\x3c\xc0\x1b\x7c\x7d\xd1\x10\x93\xa9\xbe\x73\xa8\x37\x23\xaf\x63\xd8\xd6\xc4\xf9\x5d\x45\x72\xd4\x78\x7d\xf2\xf1\xd1\x41\xa7\xdb\x18\x46\x6a\xc2\xb5\x43\x61\x5a\x33\xee\x6f\xca\x04\xbc\x23\xd7\x11\xdc\x1f\x1b\x41\xb3\xd5\xa1\x70\x92\x3d\x9c\x66\x0f\x5f\xb2\x87\x57\xe6\xe1\xb3\x31\xa9\xde\x08\x10\x1c\xe5\x64\xd9\xa7\xb0\x5d\x46\x1b\xa0\xdc\x92\x94\x6e\xa2\xbb\xea\xa6\x9a\x54\x04\x6c\xf0\x0a\xcd\xa0\x8f\x56\xb8\x7d\xab\x86\x4a\xa6\xd3\x44\xd7\xa5\xc0\x25\xd9\xea\xc2\x56\xa7\x9e\x1e\xd4\xbe\xee\xd6\xd3\x08\x5d\x92\xad\x0e\x42\xd7\x18\xd6\xbe\x37\xab\x33\xa1\x98\x97\x31\x97\xea\xfe\xc8\xf4\x41\xe2\xfa\x14\xfc\x2b\x4f\x82\x1f\x79\xca\x08\x72\x51\xf6\x59\x56\xd7\x89\x2c\xb7\x56\xea\xfb\xf3\xbf\x63\x87\x7c\x1a\x0b\x43\x4a\x10\x4d\xa6\x63\xa1\x45\x83\x0f\x06\xa1\x1c\x61\x08\x1e\x9e\x9e\x32\x56\xb6\xa7\x5c\xbf\x3f\xd3\xb6\xbf\x27\x78\x3f\xa8\x67\xca\xbe\xb7\x00\x71\x83\x2b\x81\x99\x32\x42\x25\x06\x65\x3f\xd6\xb4\xca\x60\xbf\x9b\x45\x4f\xc4\x70\x4b\x7e\xc4\xd0\xed\x1a\x2b\x2c\x06\xe4\xbb\x9d\x6c\xb9\x2b\x79\x49\x97\x99\xb3\x5b\x67\xce\x19\xdf\xb8\x1d\x73\x57\xed\x00\x6b\xce\xf2\xaf\x76\x20\x3f\xa3\xd7\x82\x9f\xa1\x41\x14\xa3\x41\x74\xd3\x2b\xb6\x01\x67\xe4\x6a\x0c\xdb\x46\x69\xe5\xa7\xb8\x1b\x98\xbd\x30\x26\xa4\x17\x40\x6c\x5f\xdc\x8c\x81\xe3\x66\x1f\xfe\x7a\x1f\x9b\xa9\xfd\x8d\x42\x82\xdb\x7e\x0a\xfc\xcf\xf6\xc3\x00\x23\xb1\x6a\x3b\x7f\xb7\xe5\x16\x1a\x2b\x92\x48\x86\x63\x46\xdd\xed\xfe\x64\xec\x99\xd6\x55\x9c\x44\xd7\x7c\xe3\xfe\xd1\xd1\xe6\xde\x38\x29\x57\x77\x35\x24\x47\xe3\xea\x64\x3c\xdd\x5c\xfc\x39\xdf\xec\x74\xdc\xfc\xf9\x7b\x45\xa3\xa8\x26\xd8\x29\x39\x1d\xab\x52\x10\x95\xf7\x33\xab\xbb\xcf\xc8\xfb\xb1\x8d\xb1\xc8\x6d\xb7\x3f\xff\xec\x5b\x0f\x90\xcb\x0f\x28\xa5\xae\xdf\xbf\xc4\x54\xf0\xca\xf5\xad\x03\x2d\x0b\x38\x55\xae\x8f\x31\x19\xd4\xbb\x24\xce\x03\x07\x8e\xf2\xf3\x47\x58\xc9\xf1\x18\x5e\x8c\xe1\x1d\x91\x70\xcf\x0f\x70\x2f\x37\xf1\x0c\x9e\xe0\xd4\x30\xfe\x82\xe2\x78\x7c\x8b\xcc\xbb\x92\x03\xb4\x44\x2a\x92\xdd\x5b\xca\x9c\x96\xad\x7d\x99\x6d\x66\xca\x57\x57\xc1\xd4\xac\x2a\x3e\xec\x78\x15\x73\xac\xf8\xb0\xeb\x95\x2f\xe9\x2f\x7f\xe9\x14\x5f\x0e\x2a\x1f\xf2\xa5\xf5\xc9\xb8\xb6\x96\x7e\x19\xd7\x16\xdb\x1f\xe3\xba\xe4\xf9\x3c\x2e\x0d\xde\x1b\xbe\x71\x63\xf2\x1d\xdf\x6c\x87\x7e\xa8\x7e\x7f\x58\xfb\xfc\x8a\x6f\xb4\x33\xcf\xaa\x9f\x0f\x6a\x9f\x5f\x97\xc6\xe1\xa3\x2e\xed\x04\x76\xd3\x6b\x4d\xa4\x67\xcc\x23\x54\x49\xa9\x6b\x0c\xc1\x4f\x21\x44\x98\xd0\x99\x56\xef\xf5\xfd\x58\xb5\x7b\xf4\x00\x8e\xc9\xb9\x1c\x10\x87\x8f\x85\xd2\x0d\xfc\xdb\x9e\x71\x25\x43\x39\x72\xe8\x05\x35\x9f\x93\x01\x41\x02\xaa\x06\xe5\x71\x19\x93\x4a\x88\x36\x4b\x95\x3f\xc0\xb0\x4e\x54\xc7\x39\x7c\xf4\x3a\xa9\xde\xbd\xa8\x48\xbd\xaf\x55\x22\x06\x03\xc8\xee\x9d\x88\x45\x75\xba\xfe\xac\x42\xfe\x31\x06\xe7\x4a\x89\xa1\x03\x0f\xfe\xc7\x77\x7e\xc3\xed\xfd\x2f\xde\x83\xd0\xd5\x22\xd6\x84\x48\x26\x69\x7e\xda\xf3\xc1\x9f\xf1\x83\x11\x38\x0e\xa5\x98\x74\xb8\xec\xaf\xa9\x62\x7d\x23\xea\xfb\xaf\x25\x67\x6d\x62\x7b\xb1\xee\xb2\x9d\x62\xc7\x21\x79\x70\x3b\x20\xce\xa5\x3f\xe6\xf2\xba\xd4\x63\x12\x7b\x0b\x5e\x9b\x09\x3a\x1b\xc0\x1d\x26\x73\x3b\xab\x74\xc3\xcb\x8a\xe4\x36\xba\x98\x2c\x07\x30\x9d\x0c\xe0\x0c\x70\x27\xe0\x28\x1f\x25\x2d\x6e\x75\x5b\x46\x33\xc5\xa7\xa5\xba\x54\xcb\xf1\xcc\x4f\x0a\x33\x72\x9d\xc3\xce\xda\xdd\x4e\x07\xa1\x9e\x2d\x8f\xde\x0b\xbe\x29\x22\x6c\x3e\x5f\x19\x00\x36\xcd\x71\xfb\x5a\x36\x7c\x2d\xdb\x51\xa2\xc7\xa1\x14\xed\x50\x0e\xa3\x86\x1f\xa9\x81\x50\xed\x8e\x43\x01\x3b\xc6\xd2\x37\x23\x71\x5e\x6c\xc8\x1b\x43\xde\xc6\x12\xc1\x15\x57\xba\x31\x51\xed\x1d\x24\xf1\x8c\x42\x32\x20\xce\x49\x94\xa8\x40\x60\x5b\x3c\xf3\xbb\xbc\xce\xbe\xdf\xdc\x5d\xba\x4c\x5c\x1b\x7d\xcc\x88\xbe\x31\xf1\xd3\x3a\x90\x18\xb1\xd4\x86\x78\x92\x51\xae\xc2\xd1\x95\x6e\x77\x1a\xd8\xcb\xd6\xed\x6d\xba\xf4\x66\x00\x4e\x12\x0b\xd5\x8e\xc5\x58\x04\xda\x01\x27\x94\xa1\x0e\xf9\x38\xff\xda\x9e\x44\x3f\xdb\x7f\x01\x32\x13\xfe\x75\xa8\xff\x02\x2a\x25\x24\x88\xc6\x91\x72\xc0\xf9\xb7\x20\x08\x2a\xe3\xfc\x0f\x96\x6f\xf1\xdb\xe1\x1e\xae\x6c\xce\xa8\x3d\xe4\x03\x31\xa8\x0c\x51\x2c\x82\x48\x0e\xb8\xba\x73\x28\x7c\xe5\xe4\x98\x93\x4b\x8c\x02\xa3\x14\x26\x03\xe2\xbc\xc0\xcd\x86\x86\x7f\xd7\xd0\x57\x61\xdc\x18\x73\x5f\x8c\x4b\x55\x3b\x2d\x1c\x94\x0a\x0f\x7d\x2e\x6b\xcd\xff\xf6\x20\xdd\x6a\x88\x1f\x48\x31\xeb\xdb\xbd\x0b\xe6\xb4\x3e\x87\xe4\x73\x4d\x21\xfe\x52\x1a\xc8\xb3\x84\x48\xd7\x3f\x71\xfd\x8f\x95\xe8\x86\xbf\xc9\x6b\x9f\x39\x59\x9a\xc6\xb9\x52\xf5\xc4\x6e\x2f\x77\xd2\xbd\x11\x5f\x9b\x1a\x71\x0f\x68\x1d\x83\xfa\x62\x3c\x6e\xc7\x63\x1e\x5f\xb5\xa3\x65\x16\xb5\xcd\xb4\x3c\x6a\xcf\x02\xc1\x26\x72\x07\x5c\x8e\x4c\xc7\x56\x08\x2e\x77\x97\xd3\x52\xbf\x40\xc9\x3a\x3a\x06\x48\x48\xa9\x77\x9f\x54\x67\xf6\xf7\x41\xed\xcc\xef\x8f\xea\x77\x51\x5a\x01\xc2\x91\x34\xac\x39\x6c\x07\x42\x1a\x66\xc8\xab\xb4\x3c\x70\x69\x44\xa1\xff\xb9\xc6\x05\x9f\x56\x08\xf3\x2b\xd3\x66\x07\xf0\xb6\x2c\x09\xc7\x83\x32\xfc\xdb\xcd\x1a\xd8\xef\x7c\xa3\x3b\xf8\x8f\xd5\x56\x8b\x2c\x6f\x08\xa6\x76\xc3\x96\x9e\xcf\xf1\xe4\x2c\x5a\x0f\x3b\x36\x71\x3d\xde\xb0\x94\x1b\x12\x95\xa8\xa7\xa0\xd4\x8e\x55\x0c\x55\xf1\x45\x49\xd7\x1f\xa5\x2e\x29\x5d\xf5\x45\x65\x1c\xe7\x47\xb0\xce\x17\x85\x8e\xdd\x45\x35\xd0\x43\x95\xab\xbf\x24\x12\xd3\x7b\x14\xee\xca\xa0\xba\x99\x3e\x30\xa2\x63\x9a\xe8\xb4\x8f\x55\x90\xf5\xf5\xdb\x41\x85\x19\x44\xb0\x34\x38\xd7\xe2\x6e\x10\xcd\x64\x3e\x3a\xbf\x57\x46\x27\x59\x59\x20\x99\xae\x01\xe7\xc1\x1a\x75\x22\x88\xc6\x8d\x20\x1a\xb7\x79\xa2\xa3\x42\xf8\xfe\xa2\x8c\x1e\x6e\x9c\xfb\x56\x7e\xcd\xc8\xcb\x01\x6c\x61\xb4\x4c\xce\xa0\xb8\x41\xbc\x56\x30\x56\x27\x64\x19\x4b\xb7\x82\xc5\xf9\x7f\xfe\xef\x4c\xd6\x55\x18\x3d\x08\x36\x6a\x78\x51\xb0\x51\x7f\x0c\x97\x3b\x76\x12\x25\xb1\xc0\x99\xb6\xac\xf8\x0c\xd7\x80\x8f\x05\xbf\x11\xcb\xe0\x7e\xb0\x51\x7f\x8c\x83\x8d\x36\xd5\x38\xd8\x38\xe7\x06\xc1\xc6\x19\x7b\xb3\x4c\xaa\x3f\x4e\x56\xb4\x69\xfb\x7f\x23\xb3\x04\x37\x75\x66\x91\x7f\x93\x51\x0c\x86\x5f\x63\x94\xc9\xe6\xee\xbc\xda\xcc\x28\xd3\xcd\x63\x35\x0a\x36\x46\xb4\xcc\x36\x33\xc2\xdd\x66\xe4\xb7\xc1\x46\xfb\xf6\xba\x56\x77\xfd\xfb\x51\xf0\xbf\xd6\x67\x5c\x33\xe7\x1e\x96\xfd\x00\x41\xdd\xb1\xb2\x53\xf7\xab\x9c\xae\x63\xc7\x01\xd7\xa2\xa2\x03\xd6\x94\xfa\xc2\x5b\x10\xe4\xeb\x02\x1e\x1d\x9c\x0e\x61\x18\x92\x0e\x1a\xeb\x53\xba\x4a\x35\x0d\xf8\x58\x18\x25\xec\xb2\x31\x89\xa4\xbe\xca\x50\x13\xc5\x34\x94\xe1\x54\x34\xb3\x10\xed\x2b\xcc\x7e\xb4\x6a\x56\x4c\x95\xb8\x69\x23\x50\x63\xd0\x1e\x8e\xc5\x6d\xba\x6c\x5b\x86\xfd\x79\x93\x17\x79\x79\x93\x2f\xf1\x4a\x45\x33\x67\xad\xb6\xc1\xe5\x68\x2c\xda\x63\x31\xd4\xe6\xd7\xee\x6d\x23\x48\x54\x1c\xa9\xf6\x34\x0a\x2d\x62\xd4\x3e\x2e\x32\x11\x5b\x90\x62\x49\x35\x5d\x56\x27\x25\xef\xba\x6f\xc6\x10\xb2\xfb\x6b\x69\x14\xd3\xb3\x1b\x38\x4b\x95\x99\x5f\xd8\x61\x73\xde\x70\x99\x18\xed\xb5\xca\x45\xce\x0b\xe1\xab\xd2\xfb\x8c\x9b\x9c\xf7\x5c\x05\x57\x4e\x95\xa5\x9c\xa3\xa9\x0a\xc7\x4e\x95\xaf\x9c\xf7\x3c\x2b\xbc\x9f\xd7\x95\x48\xe1\x54\x1d\x0c\xce\x9b\x64\x9c\xc1\x3d\xcc\xf1\x25\xa3\x24\xd6\x4e\xd5\xf3\xe0\x9c\x88\xa9\x16\x13\x5f\x28\xa7\xea\xce\x77\x3e\x06\x3a\x2a\x5e\xe7\x5e\x7d\xe7\x43\x74\x93\xc2\x57\x39\xda\x79\x26\x02\xfb\xa1\xb4\x51\xa7\xe8\x72\xff\x4b\x64\xd6\x75\xac\xf0\xf5\x3f\xcd\x0a\x68\x10\xfd\x35\x2f\x5c\xd8\x01\x3d\xbe\xc1\xac\x1e\x95\x29\xf2\xad\x3a\xc9\x42\x05\xce\x84\xdf\xda\xf3\x77\x0e\xd4\x43\x65\xbf\x07\x15\x07\xda\x9b\x3e\x36\xf2\xac\xce\xf9\x93\x68\xc0\xc7\x0d\x63\xe2\x34\xe2\x2b\xd3\x8a\xd4\x7a\x1a\x84\xf1\x74\xcc\xef\x1c\xb3\x00\x45\xc1\xf5\xaa\x49\x83\x45\xdb\x83\x90\x8f\xa3\x51\xa3\xfc\x23\xed\xb1\x62\xba\x2f\x97\x0a\x6c\x02\xb2\xf5\x00\xf5\xb9\x5a\x2c\x2d\xc1\x38\x8a\x45\x63\x92\x2d\x71\x66\x54\x6e\x03\xf2\xfc\xa6\xbc\x9a\xdc\x3a\xab\xa7\x96\xc1\x6c\x73\x41\x67\x78\x83\x01\x5c\x07\x80\xd1\x2e\x37\x15\x60\x34\xf9\x1b\x86\x4e\x1e\xca\x82\x12\x94\x53\x15\xc0\xa9\x6e\xef\x35\x8c\x90\xf9\x9e\xc4\x3a\x1c\xde\x65\x6d\xab\xcd\xdb\x19\x79\x62\xc6\xb4\x63\x0a\xe3\x53\x37\x1b\xee\x35\x74\x0e\xa3\x48\xaf\xee\x81\xc9\xb8\xbd\xd3\xa8\xaf\xb0\x71\x12\x04\x22\x8e\xcd\xb2\xbe\xa1\x63\x9e\x72\x19\x58\x63\xb4\xba\x5e\x57\x50\x4e\x55\x38\xc9\x8d\xdb\xdb\x80\x7c\xab\xa0\x38\x11\xba\xf1\x8c\x6b\xf1\xe0\x34\x9c\x88\xd2\xa2\xbd\xbe\xc3\x79\x70\x3d\x50\xd1\xb4\xcc\x65\x19\xc7\x7b\x19\xb8\xe5\xba\x60\x1c\x4e\x1d\x70\x94\x08\x34\xe9\xe0\xa5\x17\x1d\x9a\xb3\xe4\x34\x8a\x43\xbc\x10\x13\x9c\x61\x78\xbb\x81\xbb\xb0\xa2\xcc\xbe\xfb\x0b\x7a\x0a\x52\x4a\x5e\xe5\xcd\x2b\xfb\xe5\x66\x1d\xee\xcd\x66\xad\xe2\xdd\x66\xbd\xe0\xc3\xf2\x24\x57\xd1\x2c\x5e\x9e\xdf\xaf\x36\xe3\x39\x0b\xca\xb1\x28\xdf\x35\x1e\x12\x3a\x26\xe7\x25\x6b\xd9\x01\xdc\xe3\x70\xc4\x20\xd4\x8e\x19\x3a\x74\xa0\xff\x1d\x5f\x81\x2a\xb1\xc5\x73\x83\xc5\xf4\x63\x11\x4a\xea\x9f\xb8\xfe\x71\x5f\x79\x7f\x13\xeb\x8c\xcc\xb6\x0d\x69\xd7\x76\x2f\xa5\x54\xc7\xb1\xb0\x37\x2d\x38\xd5\xf1\x7a\x5d\x6a\x2b\x06\x36\xa8\x22\xb0\x41\x95\x3d\x8d\xdf\x35\x6c\x19\xa3\xb3\xd9\x54\x7d\x87\x39\x9e\x79\x9e\xcf\x55\xbf\x78\xf7\x1f\x8e\xe7\x6c\xe1\x1f\x86\xdd\x65\x23\xc0\x5c\xff\x5b\xa5\xc2\x8f\x9b\x2b\x04\xc5\xde\x0b\x72\xcd\x6b\x4e\x5c\x1b\x53\x91\xd5\xd5\x59\xaa\xbd\x1a\x5c\x51\x8a\xe7\x9c\x91\x1f\x37\x70\x8d\x89\x94\xb1\xa9\x65\xcf\x71\x90\xba\x59\xab\x39\x57\x1a\xff\xe7\x4c\xab\x22\xf9\x8b\xeb\xff\xb0\x1b\x6a\x6e\x60\x03\x3f\xb8\xcb\xf7\xd0\xdd\xc0\xdd\xe0\xa0\xa2\xe2\xfd\x7f\x76\x99\xba\xdb\xae\x7e\xc7\x9b\x03\x9c\x4d\x96\x12\x2e\x67\x56\xd4\xaa\x95\xf6\xd0\x5f\x2c\x61\xfa\x57\x56\x0f\xf1\x5f\x10\xd1\xa5\xbd\x86\xcd\xd6\xd6\xcf\x55\x26\x4b\xae\x5c\xa6\xc1\xcd\x35\xd5\x72\x2a\xe4\x20\x94\xa3\x25\x6d\x4d\xdc\x4e\x71\x6f\xb8\x84\xfe\x59\x55\x32\x3e\x5f\xc1\x25\xc5\x38\x7c\xdf\xae\xbf\x33\x9d\xfa\x81\x4f\x84\xd7\xb0\xab\x1f\x6e\x46\xf8\xd1\x52\x87\xfc\x15\x8e\xa3\xc1\x40\x89\x38\xae\xa0\xe1\xaf\x96\x0c\xd7\x97\x41\xc5\xcf\x15\x59\x3f\xd7\x89\xdd\x4a\x8e\x7b\x65\x9f\x61\xda\x8c\x37\xdb\xf6\x09\x97\xe9\x71\x12\x6b\xa1\x1a\x27\x98\x2c\xd7\xd6\x54\x0a\x22\xc0\xc4\x25\xe5\x6d\x15\xc7\xb1\xf3\x0c\x77\xad\x56\x19\x4b\xc3\x48\x4d\x52\xbb\xbf\xa2\xab\x16\x6d\x0c\xa2\x71\x3b\x9e\x54\x9c\x94\xb6\xbf\x9c\xa5\x2e\x4a\x41\xbb\x9d\x3a\x63\x9b\x1e\x20\x55\xd8\xff\x5c\xc5\xb6\xdd\xbf\x52\x75\xe6\x75\x25\x92\xd9\x30\x50\x9f\x0f\x46\xc2\x81\xad\x4e\xa5\xc7\xd6\x87\x5d\xd8\x02\x99\xfa\xe4\xac\x8c\xc1\x48\x61\xb2\xbd\xbc\xd5\x91\x17\x29\x50\xea\xe9\x40\x9f\xe3\x82\x18\x2a\xe5\x20\xdd\x58\xce\xa2\x77\x13\x63\xa0\x5e\x0d\xe1\x75\x92\x46\xec\xe5\x0d\xff\x15\xe3\x4d\x09\x3e\x58\x32\xdd\x62\xa1\xcd\x7a\xba\x3c\x91\x06\x61\xcc\xfd\x31\xce\x24\xa2\xcb\x42\xa0\xca\x4e\x62\x0d\x3b\x89\x7f\x25\x3b\x7d\x12\x42\x15\x83\x7a\xb5\x66\x50\xb1\x8b\x9e\x05\x20\x33\x7e\xaa\x6e\xb6\x2d\x79\x1a\xd2\xe9\x33\xbb\x29\xa6\x4f\x79\xda\xfc\xd7\xc9\xfe\x32\xd5\xe1\x2f\xcf\x83\x0f\xb1\x11\x2b\x7f\xe4\xdd\x6c\xe4\x80\xcb\x8f\x69\x1a\x4e\xbe\x0d\xeb\xa6\xfc\x57\xa1\x62\x54\x66\xa4\x8d\x7e\x32\x2b\xf2\xbf\xa8\x01\x4f\x14\x97\xc1\xd5\x2f\x36\x40\xb9\xfc\xc3\xaa\x85\xe2\x3f\x59\x75\x12\x8e\x07\xc6\x5a\xf8\xf5\xda\x3f\xfe\x8b\x6b\xff\x12\x0b\xf5\xeb\xb5\x7f\xfa\xd7\xd5\xfe\x32\x4a\xc7\xf4\xd7\x6b\x7f\xfc\xaf\xab\xfd\x58\xdc\x84\x7f\xab\x72\xff\xc5\xbf\xae\xf2\xbf\xdb\x70\xff\xb2\xec\xfe\x40\x6b\x9c\x7f\x71\x7d\x05\xe5\x19\x5e\x59\x20\x23\x39\x0c\x47\x19\xfa\x0f\x39\x31\xd3\xf6\x5e\xb1\x01\xcc\x83\x6b\x43\xb9\x1c\x38\xe0\xfc\xdb\xf0\xe1\xf0\xe1\xf0\x71\xfe\x71\x18\x49\xdd\x1e\xf2\x49\x38\x36\xca\xe3\x24\x92\x51\x3c\xe5\x81\x28\x1a\xf8\xae\xa8\x4d\x96\x88\xbb\x58\x3e\xb0\xf1\xbe\xec\xba\xb4\xc7\x95\x98\x74\x83\xb3\x2c\xc0\x87\xe7\x01\x3e\x79\x40\x0d\xfa\x1d\xde\x70\xc0\x33\xab\xc1\x37\x58\xad\xf4\xca\xa8\x3d\x4a\xb4\x16\x2a\x2e\xc8\xfa\x88\x5f\xaf\x6f\x88\x33\x0c\xc5\x78\x10\x0b\x5d\xee\xf7\x17\xa1\x8a\x75\x63\xc0\xef\x1a\xd1\x10\x43\xf2\x66\x42\x5c\xe7\xa3\x70\x8b\x1e\xab\xcf\x83\xe5\xb2\x77\xe4\xe8\x06\x9c\xf7\x91\x1c\x18\x5d\x7a\x2b\x71\x83\x5b\xe0\x9a\x82\x7d\x7f\x92\xd8\xf7\x5d\x7b\x4f\x5d\xe5\x13\xd7\x89\xc2\x8f\x3b\xa5\x8f\xb6\x3a\x79\x53\x61\x22\x74\x69\xae\x74\x05\x3b\x1f\x22\x2d\xbc\xc6\xe9\x55\x18\x37\xcc\x42\x16\xca\x51\xc3\x3c\xf2\x1b\x9b\x0d\x71\x1c\x05\x7c\xdc\x88\x75\xa4\xf8\x48\x98\x96\xdd\x45\x89\x6a\xf8\xc6\x2e\xb6\x2a\x6e\xee\x49\xa9\x05\x32\xcd\x48\x38\x80\x17\x01\x58\x53\xf4\xb4\x7e\x4f\xdd\xca\x1d\xc9\xf7\xd6\x46\x79\x63\x2d\x94\x67\x18\xf4\xe7\xbf\x5d\x0e\xd6\xda\xf5\x6c\x6e\x29\x55\x4e\x75\x74\xb9\x0d\x02\x34\x70\xd8\xda\x4a\xf2\x6b\xe1\xab\xda\x04\x1f\xd4\x55\x87\x8f\xdc\xe6\x17\x58\x10\xe9\x06\xdd\xfa\x6d\xf2\x33\x32\xba\x31\x0d\xd8\x07\xee\xf2\xfa\x95\x95\xda\x9a\x9e\x41\x87\xba\x7c\xdb\x9e\x63\xfe\x0a\x78\x9d\xda\x9d\x0d\xa7\x7f\xbb\x86\xb5\x26\xbe\x99\x28\x55\xa6\x4a\xdf\xef\x38\x14\x2c\x7f\x8d\xb5\x50\x6d\x9f\xab\x76\x1e\xd8\x59\xe6\xb4\x71\x6a\x5f\x98\xa1\x0e\x06\xe9\x69\x4b\x0e\xef\x05\x4e\xf8\x1b\xb8\x0f\x2e\xf1\xa8\x95\xd5\x3a\xe0\x8e\xdc\x6c\xc3\xe9\x36\xa6\xed\x82\x5b\xb2\xbd\x0d\xd7\xdb\x60\x2f\xd3\xbd\xa0\xb4\xb6\x5f\x81\x18\x5f\x08\xb8\x25\x83\x6d\x38\xd2\x98\x6c\x48\xba\xc1\xb5\xf9\xb3\x4d\xeb\xd1\xb0\x15\xe8\x6b\x4d\x38\x86\x6a\x0d\xc0\x71\x2a\xf0\x0f\xd7\x0f\xc1\xdd\x4d\x39\x57\x49\x55\xc5\x78\x95\x0b\x17\xc9\x6f\x7c\xa3\x15\xe1\x3f\x6d\x1d\x8d\x46\x63\x61\x94\xab\xf6\x64\x90\xbd\x1c\xa3\x97\x37\x0f\x1a\x99\xf8\xed\xfd\xc6\x54\xb7\x77\x1b\x53\xbf\xbd\x5b\x0f\x4d\xf1\x23\xad\xa3\x89\x03\x4e\x77\x7a\xdb\x88\xa3\x71\x38\x68\xa8\x91\xcf\x49\x07\x1a\xf6\x3f\xb7\xbb\xb3\x4f\x8b\x61\x3a\x2b\xc9\xdc\x9a\x5b\xb2\xec\x62\x49\x49\xf1\x15\x97\x83\x2c\x44\xa2\x62\xc1\x8c\x85\xd2\x13\x2e\xf9\xa8\x18\xc0\xab\x7a\x69\xc9\x6f\x0a\x6d\xec\xe5\x36\x91\x14\x5e\x6c\xd3\x55\x3a\x75\x91\x6c\x6f\xc7\xab\x8e\x62\xaa\x49\xd6\xfa\x7a\x69\x75\x09\xe5\x38\x94\x25\x8f\xee\x72\x8b\xd6\xec\x46\xd6\x82\x3f\xa4\x98\x55\x84\x8a\x98\x35\xca\x01\x26\x46\x8d\xb4\xda\x64\x45\xa1\xfc\x52\x73\xd7\xd5\xf7\xa2\x9f\xd4\xbe\xd7\x4f\xbd\xfd\xa8\x7d\xaf\x87\x23\x7e\x2a\x73\xd3\x1f\x92\x0c\x55\xd5\x83\xf3\x36\xa8\xe4\x78\xaa\x4c\xd6\xf4\xe9\xcb\x36\x60\x3c\x62\xf0\x9a\xb6\x9c\x71\xe8\x3f\xf0\xa3\x48\xc7\x5a\xf1\x69\x7b\xcf\xed\xb8\x9d\x36\x1f\x4f\xaf\xb8\x7b\xd0\x1e\x84\xb1\x7e\x10\xc4\x71\x01\xe0\x4e\x42\xe9\x06\xc6\xae\xf9\x12\x98\xc1\xfc\xb2\x0d\xd2\xe2\xc0\x05\x90\xcf\x44\x1c\x4d\x44\x7b\xcf\x7d\xe8\x76\xb0\x64\xf9\x75\x51\xf8\x47\xad\xb0\x18\x4f\xda\x03\xae\xc5\x34\x0c\xae\x85\xc2\x82\xd5\x57\xb6\xd8\x93\xa0\x6e\x6a\x58\xab\xe2\x0d\x51\x6e\x80\x07\x44\xef\xcc\x1f\x49\x7b\x79\xea\xe1\x7b\x95\x3f\xe9\xa5\x74\xc4\xbc\x24\x7d\xf3\x97\xfe\xaa\x97\x41\xe9\xe5\x52\xdf\x7e\x0e\x4c\xfd\x67\x75\xc5\x24\x9d\x52\x66\xaa\xe6\xb2\xf1\x7d\x80\x5e\x99\x42\x44\xed\x78\x78\xd7\x43\x43\xe5\x6c\x6d\x7f\xeb\x45\xe5\xb3\xcc\x3f\x87\x43\xb2\x83\xbe\x39\x43\x52\x56\xb6\x56\xa6\xc8\x92\xb3\x6d\xd3\xdc\x1b\x49\xbf\x84\xc0\xcf\x11\xe0\x39\xe2\xf5\x58\x2b\xd3\x6f\x51\x47\x1e\x94\x72\xd9\xe4\x2f\x7d\xbc\x4f\x26\xd7\x6e\x6c\x1e\xff\xdf\x03\xf8\x23\x00\x19\x81\x8a\x40\x47\x20\x22\x48\x22\xe0\x11\x04\x11\xfb\x2a\x89\x73\xca\xe3\x6b\x87\x42\x14\xad\xcb\xd7\x15\x44\x24\x4f\xd9\x95\x66\xf6\xaa\x5f\x50\x85\x6c\xfd\x19\x97\xac\xe0\x18\x2a\xd7\xe6\xe1\xdd\x31\xe4\x83\xc2\x2b\xf0\xf2\x63\x61\x8a\xdc\xf3\xdb\xe2\x96\x19\x51\xba\x60\x8d\x33\x01\xaf\xd5\xaa\xdb\xac\xa2\x69\xac\xb9\x16\x0e\x68\x0a\xff\xf1\x5a\xb9\x92\xdf\x84\x23\xae\x23\xe5\x26\xb1\x50\x47\x23\x21\x75\x71\xb3\xd2\xa9\x0a\x07\xe8\xf3\x6b\x36\x57\x62\xbb\xe2\xf1\x55\x16\x95\xa5\xe9\xea\xb3\x6d\x3d\xe5\x06\x5a\x8d\xdf\x8a\xbb\xf9\x5c\xb9\x13\xa1\x79\xfa\x18\x5f\x85\x43\x8d\xcf\xdd\x43\xb3\x3e\x27\x5a\x47\x72\x3e\x97\xae\xe6\x6a\x24\x34\x9e\x07\x8f\x66\x72\x1c\xf1\xc1\x7c\x4e\x94\x3b\x55\x78\xf7\xf3\x33\xcb\x0b\x84\xa2\x72\x72\xa5\xc4\x10\x14\x33\x5d\x03\x92\x3d\x17\x44\xe3\x79\x22\x92\x10\xd9\x6c\x2a\xd7\xbf\xb3\xec\x72\x87\x3f\x02\xfb\x23\xc0\x1f\x89\xcb\xed\xcf\xc4\xe5\xfd\x7c\xd7\xc0\x4b\xb7\x29\xf4\xc2\x9e\x8b\x81\xe0\x99\xb7\x32\xfc\x0f\x6f\x72\x95\x80\x15\x6b\xba\x80\x81\xef\xd9\x9b\x5a\x1f\x7b\x78\xbf\x6b\xb0\x67\xfe\xdd\x33\x28\xc2\x28\xcd\xac\x3f\xc4\x87\xdd\x05\xf8\x11\x1b\x48\x88\x23\x16\x48\x18\xe3\xcb\xce\x02\x6e\xf0\xa1\xbd\xb3\x80\xed\x88\xdd\x44\x30\x89\xd8\x76\x04\x57\xeb\x58\xea\x9e\x3f\xf7\x26\x11\xf0\x17\x18\x75\xff\xda\x1b\x47\xc0\xdf\x98\xbf\xc1\x0f\x4f\x01\xdf\xf6\x3e\x62\xb6\x39\x9f\x7b\x69\x8e\x37\x7e\xe9\x39\x0e\x04\x37\xde\x19\xf0\x5d\x3c\xff\xfd\xca\x93\x10\xbc\x31\x58\xfc\xb1\x77\x06\xfe\x04\x93\xad\x3d\x47\x55\x05\x3f\xfa\x47\xde\x7d\x5a\x0c\x7f\x22\xd4\x13\xf3\xe7\x39\x66\x8d\x7b\x83\x05\xde\x61\x01\xff\xab\xd7\xc1\xeb\x0f\xb2\xf6\x8c\xd6\x53\xfe\x0c\x69\x46\x6a\x73\x42\x79\xc7\x9b\x46\x10\xdc\x1a\xe2\xf7\xbd\xaf\x98\xd2\xd5\xd6\xfa\xc1\x1b\x47\x78\x0d\x42\xc4\xee\xfd\x53\xf3\x03\xee\xa2\x5f\x48\x0c\xdb\x81\x76\x9a\xe0\xf5\x36\x62\x89\x84\xeb\x88\xdd\xf3\x2b\xec\x0a\x8e\x84\x63\xf2\xb9\x63\xf3\xe7\xc4\xfc\x39\x35\x7f\xbe\x98\x3f\xaf\x30\x1f\xdd\x11\xb6\xe4\x60\x01\x27\xf8\xb0\xb3\x80\xd3\x6c\x00\x9f\x47\xeb\xaf\x6e\x38\x28\x5f\xdd\xf0\x2d\x1b\xfb\xef\xf8\xf0\x68\x01\x4f\x33\xac\x97\xd1\x86\x3b\x1a\x89\x24\x1a\x05\xc5\x9b\x88\xad\x4c\x80\xb8\x7c\x33\xab\xa4\xf7\x5a\xdd\xdd\xbf\x56\xf9\x45\xb4\x4c\x15\xf7\x3f\x95\xef\xa7\x55\xc2\x4c\x2a\x82\x87\xa3\x28\x5d\xc0\xbb\x88\x3d\x93\xf0\x21\x62\x3f\x25\xbc\x8a\xd8\x87\xc8\x8c\xc7\x59\xc4\x5e\x29\x78\xbd\x9e\xc8\x7b\xfe\xc6\xd3\xe0\x87\xa6\xb1\x3f\x6d\x6b\x3f\xae\x1d\x16\xe5\xfa\x7d\x4c\x85\x18\xd9\xf4\x8c\x98\xb1\xe8\x78\x3d\xb8\xcd\xa2\x28\xf3\x0c\x8a\x1f\x23\x38\xb3\x63\xf9\x4c\xb2\x68\x65\x0a\x62\x08\x20\x82\xb0\xa0\xee\xad\xa7\x81\x9f\x79\x11\xf0\xae\x97\x00\x7f\xe8\xa9\x94\xd8\x27\x9e\x00\xff\xb3\xc7\xc1\x3f\xf1\x42\xf0\xcf\x3c\xcc\x82\xfe\x4a\x6d\xca\x82\xee\x0b\xd3\x56\x69\x4a\x7e\x34\x88\x8e\x8d\x62\x43\xe1\x6b\xb4\x3a\x27\xf3\x43\xf0\x31\x27\xf3\xcf\x88\xf9\x82\x84\x14\xc2\x4d\xd9\x24\x7f\x46\x30\x23\xb1\xcd\x06\x67\xf3\x74\x3e\x8b\xd8\x50\xc2\xcb\x68\xf3\x95\x24\x43\xc9\x66\xe4\x59\xb4\xe1\x24\xb9\x93\xc8\xa9\x8a\x02\x11\xc7\x62\xe0\x64\x6b\x6b\x2c\x48\xea\xd8\xcd\x76\x22\x4a\x5f\x52\xa3\xcb\x89\x93\xe9\x54\x2d\x95\xdb\x59\x52\x61\x5f\x46\xc4\xf9\x22\xaf\x65\x34\x93\x0d\x7d\x37\x15\x5e\xc3\x69\x49\xba\x30\xb3\x07\xfb\xf4\x8e\x84\x60\x73\xbd\x3c\xb9\x73\xe0\x6b\x44\xcc\x7b\x7c\x99\xa7\x88\xa9\x7f\xc8\x92\xcb\x2c\xbd\xb7\x0b\xd7\x50\x42\x2c\xc8\x2b\x85\xee\x62\x78\x11\xd9\x2a\x6c\xca\x83\xa3\xc8\x7c\x5b\xd1\x69\x7e\x84\x03\x46\xe1\x7d\xf4\x8b\x97\xae\x7c\xde\xc0\xf9\xb5\x9c\xf8\x06\xfc\x4b\x54\xbd\x8b\x52\xe6\x29\xa0\xd7\xa7\xa8\xe3\xe9\xe5\x80\xe9\x71\x1b\x5a\xce\x10\x98\x1e\xc6\x4a\x6f\xbb\xc5\x70\x70\x54\x10\x40\xb2\x84\x98\x0f\xa5\xb2\x68\x76\x64\x97\x3c\x94\x9c\xd4\xef\x23\xd0\xf3\xb9\xb0\x11\xea\x95\x6f\x98\x85\x2b\xfb\x66\x14\x21\x8a\xd7\xc2\x45\xec\x4b\x04\x3f\x7e\xb5\x87\x3e\x45\x9b\x66\x8c\x4d\xef\x15\x9a\x19\x73\x63\x13\x7b\xe1\x8c\x79\x9b\x2d\x0f\xbf\x47\xeb\x73\x9b\xfe\x88\xe0\x6d\x04\xb7\xe4\x53\x54\xca\x52\x86\x0a\x1b\xca\xc4\x3f\x22\x46\x7e\x0f\x50\xb0\x76\x56\xe6\x58\xb4\x89\x0e\x8d\xe9\x6b\x9a\xde\xcf\xf2\x25\x76\x8c\xe0\xa9\xe5\x24\xfb\xdd\xa2\x94\x21\xfb\xa9\x40\x85\x7f\x91\x6f\xde\x0e\x29\x66\x41\x33\x5f\x13\xa3\x9c\x62\x62\x45\x0e\xed\x6e\x9e\x88\x19\xeb\x3b\x25\x09\xe6\xec\x38\xec\xa4\xc9\xce\x04\x04\x5e\x82\xc9\xce\x12\x4c\x76\xa6\x4c\x9f\x68\xe0\x5e\xe2\xf2\x05\xed\x25\x8c\x08\x86\x88\x76\x68\x9f\xa4\x39\xe9\x5b\x5d\xd0\x4c\xb7\xba\xd0\xa5\x5e\xfa\x8e\xdb\x24\xf5\xad\x2e\x85\x04\x47\xed\xa7\x5a\xb5\x42\xac\xe8\x95\x1b\xa2\x42\xab\xe1\x96\x73\xbf\x99\xb9\xaa\xb1\xf5\x22\xfc\x6f\x66\x61\x1f\xdd\x32\x41\x85\x09\xed\x39\x87\x04\x6c\x2a\x77\x03\x82\xe4\x71\xcc\x37\x24\xb0\x81\x49\xb8\x3a\xdf\xb4\x08\x61\x1b\xaf\x47\x01\x1e\xb2\x27\x10\x84\x2c\x4c\x20\x5a\x09\xdc\xc1\x63\xbb\x36\x3e\xac\xd9\xdc\x7a\x70\xfe\x67\x7c\xeb\x47\x17\x0f\xec\x01\x2f\x89\x77\x4a\xb2\x96\xa4\x8c\x49\xcc\x1e\x66\xb3\x50\x87\x21\x5b\x95\x46\xf3\xf1\xa1\x4d\x65\xb9\x2a\x09\x26\xd7\x8d\x49\x14\xeb\xc6\xe3\x8d\x39\x30\xd3\x40\x80\x28\x24\x4e\xc7\x35\xf2\x72\x5d\x0a\xce\xe1\x38\xe2\xba\x96\x80\x93\x87\xa4\x2b\x76\x7f\xb3\x97\x78\x96\x73\x69\xc2\x30\x64\xdc\xac\xc8\x7f\x91\xa3\xbf\x31\x0c\x89\x6c\x1d\x74\x7e\x53\xbf\x1d\x74\x7e\xeb\x8a\x5d\xf3\x4c\x74\x9b\x53\xfc\x61\x90\x8b\x16\xde\xb6\x17\x57\x96\x2d\x5e\x5c\x59\x16\x30\x09\x11\x53\xeb\xf9\x22\xf8\x7b\xa2\xcd\xa6\x1d\x34\x2b\xbb\x72\x83\x65\xb9\x96\xac\x92\x6b\x89\x95\x6b\x36\x2b\x28\xba\xfe\xea\xd2\x2d\x81\x19\xe1\x60\xe5\x9f\xbd\x90\x1d\xc6\xe1\xfa\xeb\x97\x62\xcb\x4f\x56\xcd\x18\x84\x6c\x1c\xc2\xcd\x66\xf0\x1f\x39\xf8\x76\xb8\x94\x58\x3b\x15\xc8\xeb\xfb\x48\xa4\x7d\xb4\xb6\x7f\xe4\xca\xd9\xa2\x51\xfc\xe3\xc4\xa5\x30\x09\xd9\x76\x08\x57\xe1\x2f\x5e\x39\x36\x0d\xd9\x57\x05\xa3\x90\xc5\x09\xcc\x42\xf6\x1e\xee\x42\x76\x02\x5f\xd5\x32\x9f\xe7\xcb\x89\xf5\xfa\x5a\x3f\xbe\xbd\xbf\x20\xbf\x6f\x63\x90\x10\x5e\xba\x65\x50\x15\x4d\xdb\xed\xee\xef\x1e\x88\x83\xdf\x88\x68\x77\x1f\x3f\xec\x18\x5b\x2d\x4d\xa5\x40\x92\xc3\xdd\xf9\x7c\xeb\x26\x21\x82\xf6\x79\xbb\xeb\x71\xda\x22\xdb\xe6\x57\x7b\x3b\x21\x08\x5c\x44\xfb\x04\x86\x51\x55\x4b\xd3\x45\xaa\xd6\x24\xf5\x54\x1a\xbb\xdd\x43\xde\x47\x3a\x3c\x95\x69\x35\xa5\x9b\x2c\x1e\x1f\xf2\xf9\x7c\xe7\x31\x63\x8c\x37\x9b\x69\xa5\x19\xf4\xce\xc1\xc3\x47\x7b\x62\xbf\xee\x6f\xad\x60\xdc\xef\x3c\x7e\x78\x90\xc3\x14\x09\x39\x3a\x25\x98\x87\x0f\x1f\x1e\x88\x83\xba\x43\xbd\x82\xa6\xdb\xd9\x3d\x78\x94\xc3\x1c\xac\x44\xd3\xdd\xed\xec\x1d\x14\xf4\x3c\x5c\x8d\x68\xff\x60\xb7\x44\xf4\xa3\xd5\x40\x8f\x76\xbb\x07\x8f\x72\xa0\xc7\x2b\xab\xdb\xe9\x3c\x7e\xbc\xbf\x93\x03\x15\xb9\x40\x2a\xa8\x76\x76\xf7\x1f\x3d\x2c\x41\x75\x57\xe3\x3a\xd8\x39\xd8\x2f\xba\xa9\xbb\xb3\x1a\xd7\xa3\x47\xfb\xb6\x33\x6b\x2a\x64\x59\xe0\x61\xb4\x31\x0a\xbc\x6f\x9a\x24\x36\x9d\xe2\x62\x01\x33\x72\x13\x96\xfe\x04\x21\x19\x90\x37\x59\x92\xc9\x61\x42\xf6\x28\x8c\x13\xe2\xb4\x1d\x5a\x7a\xb9\x53\x7e\x89\xbf\x29\x85\xdb\x0d\x53\x65\xa7\x3c\x55\xae\xc3\x5f\xbf\x89\x2e\xd3\x29\x54\x7a\x05\x31\xe6\xfb\x33\x73\x86\x74\x40\xbb\x3c\xd3\xd5\xb6\x4a\x57\x66\x08\x86\xf7\x68\x11\xc1\x38\x75\xf3\xab\x40\x44\x7a\x19\xcd\x8c\xdc\x9a\xa5\x5a\x98\x89\xae\xed\x1d\x34\x47\x21\x9b\x24\x70\xb2\xc1\x90\x90\xbf\x11\x23\xd4\x5b\x68\x41\x9c\xae\x58\xbb\x6a\x99\x40\x2b\x5d\x5a\xfe\x63\x13\x77\xfa\x36\x89\x90\xe9\xc2\x53\x67\xb9\x5b\xbd\x55\x7d\xed\x95\xfa\x1a\x2a\xf5\x58\x9c\x5c\x5b\x38\xd7\x20\x0c\x43\x6a\xde\x75\xec\x4e\x67\x06\x3d\x09\xd7\xa4\xd6\x37\xe5\xbe\x59\x42\xea\xcc\x70\x12\xfe\x55\xf9\xae\x2d\xdf\xb2\xe5\x57\xc2\xb4\x53\x18\xc3\x2f\x17\x7f\xd1\xb6\xe2\x7b\x10\x92\x21\xf1\x8d\x3a\xd8\xc9\xfe\x4f\x8d\x86\x6a\xcf\x69\x7f\x55\x14\x9e\x87\x1b\x4c\x68\xa3\x24\x57\xf4\xe3\x6f\xe1\xdf\xba\x8b\x2e\xbf\x0a\xb1\x74\xd5\x1b\x86\x57\xd9\x93\xe5\xc8\x4a\x91\xc6\x4b\x3d\xca\x57\xba\x65\x33\xb0\x0e\xfa\x2d\xac\x83\x1a\x9a\xbe\xaf\x98\x33\x4c\x12\x43\x7d\x17\x02\xef\x0c\x06\x5e\x17\x7c\xaf\x63\x1a\x81\xc9\x1a\x16\x15\x85\x66\xa2\x09\x62\x36\x4b\xfc\x19\xc5\x8c\x31\x66\x05\x5b\x50\x78\xba\x0a\xf1\x8c\x7c\x47\xed\x7a\x19\xc7\x1b\x01\x57\x09\xa0\xe2\x63\x91\x70\x83\xe4\xfd\xb2\x11\x6c\x17\x21\xcc\x98\x53\x46\x62\x6c\xd5\x22\xab\xb6\x4e\x6d\x55\x2f\x16\x16\x15\x9a\xac\x97\xeb\x4c\x80\x5c\x4a\xb4\xbb\xf5\xfb\xd0\x40\xd8\xab\x8d\xde\x84\xeb\xaf\xe6\xc1\x54\x97\x89\xbb\x3d\x9f\x1b\xa3\x21\x7f\x23\xcc\x1b\xe1\xf2\x34\x45\x66\x9a\x4d\xd4\xe6\x12\x2d\xe7\x96\x2c\x90\xa5\xf7\xf8\x08\xcc\x24\x2a\x96\x52\x50\x0a\x54\xa1\xf3\x14\x94\x59\x8e\x4f\xee\x0e\x80\x97\x92\x4f\x1a\x6c\x01\x24\x69\xee\xcb\x88\x19\x5b\x25\x64\x09\x26\x29\x4d\xf0\x02\xd8\xc4\x15\xbd\x3a\x95\x35\x92\x22\x08\xf3\x1c\x9b\x48\x1d\xa7\x19\x81\x41\x8d\x2c\xf3\x3d\xcb\x1e\x9a\x52\x98\xb7\x20\x27\xca\xe0\xe3\x48\x14\x85\x77\x21\xfb\x0e\x1f\xc2\xd5\xf7\xef\xd4\xaf\xae\x4d\x2b\x31\x00\x37\x11\xdc\x44\x34\xbf\x19\x94\xe7\xa2\x58\x63\xe6\x4e\x8d\x99\x3b\xb5\x2b\x7a\x79\x92\xae\x77\xa6\x29\x09\xad\x4f\xa9\x1b\xf2\x26\x4c\x1d\x4b\x77\xe4\x83\x1d\x81\x80\x42\x54\x0f\x89\xc3\xba\xd3\x8c\xa8\xe6\x6b\x6d\x71\x2b\xa1\x09\x0a\x44\x11\x45\xed\xed\xd5\xaa\xf6\x69\x96\x83\xd9\x8b\x84\x70\x08\xb2\xe3\xc0\xc5\xd5\xa4\xd5\xbc\xa7\x59\xea\x51\xd3\x40\xed\xe2\xed\x45\x67\xe1\x1a\xf7\xd1\x4e\xea\x9d\x3a\x59\x6d\x57\x0d\x05\x8c\x12\x38\x0b\xed\x25\x2d\xaf\xd7\x60\x79\x0c\x01\x22\xf9\xb8\xae\x96\x7d\x18\x21\xc0\x57\xb5\xaa\x9a\x8f\x28\xae\x5f\x87\xe4\x48\x53\x7b\x08\xf8\xda\x68\xcd\x28\x35\x8f\x43\x63\xdb\xef\x2f\xe0\x6b\xa5\x7f\x2a\xfe\x85\xc6\xd2\xa4\x97\x6c\x46\xfc\x08\xbd\x8b\xb5\xcc\xc8\xb1\x20\x22\x4d\xb5\x01\x29\x94\xad\x5e\xc3\xeb\x90\x08\xdc\x98\x93\xa5\x60\x7d\x94\x14\x5f\x6d\x12\x69\x14\x0f\x98\x4a\x08\x8e\x43\xd3\xab\xdf\x37\x38\x3d\xac\x2f\xef\x8e\x7c\x0d\x33\x87\xde\x71\x88\xf7\x4d\xa5\xf7\x31\xff\x0c\x59\xee\xc9\x4a\x62\x07\x5e\x29\xeb\xd9\x12\x72\x10\x1f\x69\x07\xde\xdb\x9f\xc9\xd4\x48\xa7\x41\xe9\x4d\xac\xb9\xd2\x65\x90\x61\x28\x47\x42\x4d\x55\x28\x35\x7a\xbd\xf0\x65\x96\x15\x39\x46\xbf\xd9\x8b\xcc\x6f\xc6\xa5\x8c\x34\x7a\x7e\x63\x07\x4e\xd0\x9f\x76\x4b\xbe\x83\x33\x12\x52\x28\xae\x23\xf5\xe5\xf8\x9d\x03\x5f\x15\x7e\x39\xd2\xb6\x10\x66\x6a\xc8\xe1\x63\x41\x9e\xe5\x19\x15\x29\x85\x67\x69\x43\x30\xb1\x8b\xad\xee\x67\x48\xab\x54\x38\xf0\x22\x5a\x83\xeb\x35\x5e\x85\x04\x2f\x57\xae\x8c\x4c\x92\xaa\xec\xd7\x9e\xbd\x75\xd7\xd8\x3e\xa6\x17\x5f\x6c\x30\xca\x2e\x05\xbc\x0c\x31\x27\x8a\xb5\xcb\xde\xaf\xbf\x2a\xb2\xc8\x26\x4e\xac\x2d\x4d\xe1\xf3\x4a\xe7\x81\x90\x41\x34\x10\x5f\x8e\x5f\x3f\x8d\x26\xd3\x48\x0a\x4c\xd4\xbf\x80\x2f\x06\xf5\x25\x85\x27\x1b\xd6\x77\xdc\xc0\xf8\x68\xb3\x02\xa0\x9b\xf0\x47\x98\xc6\xe6\xb2\xff\x70\x60\x07\xd3\x35\x6e\xfd\x87\x03\xbb\xf8\xc4\x1c\xe8\xd8\x57\xcc\xc1\x33\x51\xf0\x29\x64\x9f\xe0\x6d\xb8\x92\xe5\xca\x3e\xa9\x84\x69\xa2\x28\xc9\xae\x40\xdb\xa6\xf7\xbc\x7c\x05\x1a\xea\xa7\x72\x3e\xe7\x90\x98\x85\xd7\xae\x20\x89\xcb\x21\x40\xc1\x6f\x84\x3d\xda\x76\x41\x31\x67\xd0\xdc\xc4\x22\x81\x11\xa2\x38\x7d\xcc\x2f\xc5\x02\x37\x75\x41\x59\xbd\xf4\xf7\xb0\x7a\x01\x94\xde\xe4\x82\xba\x25\x6f\x43\x7b\x73\x89\x4e\x0d\xd8\x3f\xc2\xf5\x6e\xc0\xdf\x8d\x14\x5c\x29\xa2\x26\x21\x7c\x4b\xc0\x86\x0f\xa0\xfb\x6e\x88\xde\xc0\xee\x02\xd4\x90\xad\xb9\x6b\x7f\x59\x41\x40\x3f\x9e\x4e\x1d\x62\x9c\x16\x17\x1e\xa8\x55\xd7\x80\x08\x4a\x3d\xfc\xb4\x93\x79\x14\x33\xcf\x5e\xb7\x76\x8d\x81\x74\xfd\x56\x17\x35\x3a\xd7\x7f\xd9\xea\x66\xf7\x19\x78\xd5\x52\xd2\xe5\x27\xad\x7a\x51\x95\x15\x2b\x6e\x13\xa1\xa0\xb1\x75\x0f\x17\x20\x86\xeb\x95\xc3\x7c\x5d\x54\x95\x7b\xf9\x32\x1f\x4b\x80\x3e\x96\x01\x6e\x2f\xd7\x17\x3f\xcc\xf1\x95\x2d\x7f\xd6\x00\x11\x2b\xae\x11\x2e\x6e\xee\xd3\xc5\x32\x67\xc1\x93\x8a\xc6\xb8\xa0\x90\x0c\xd7\x0c\xeb\xd6\x8c\x88\x21\xaa\x77\xee\xf6\x82\x02\x1f\xae\x1f\xfe\x64\x98\x79\x57\x82\x21\x4b\xd6\xec\xf9\x54\xe7\x41\x54\xf2\xcd\xa6\x9e\xd9\xa8\xe4\x99\xcd\xdc\xb0\xa6\xbf\x79\xc5\x05\xbb\xe8\x05\x8c\x70\x46\x52\x2f\x6c\xf4\x57\x5e\xd8\xa8\xec\x85\x35\x7a\x8f\xf5\xa0\x47\x43\xb6\x6c\x9d\x3c\x09\x29\x10\x15\xb1\x7b\xfe\xcd\xd3\x43\x08\x9e\x7b\x44\x46\xec\x3e\x78\xee\x5d\x27\x10\x7c\xc5\xfd\xd7\x4f\xde\x75\xb2\xa0\x6e\xf0\xdc\xbc\x90\x91\x1b\x7c\x35\xef\x64\xe4\xfa\x9f\x16\xb0\x86\x73\x95\xf9\x5a\x70\x2f\xb6\x56\x95\xfd\xd0\xf5\xab\x86\x22\x97\x7f\xa3\x99\xdb\x2d\x63\x65\x9f\x04\x43\xf3\x29\x78\x0e\x96\x6f\x53\xa6\xed\xfe\xbf\xdc\xbd\x89\x76\xdb\x38\xd6\x30\xf8\x2a\x0a\x3f\x4d\x1a\x28\x5f\x29\x92\xed\x24\x15\xa6\xf8\xeb\x38\xab\xe3\xd8\x59\x6c\xc7\x71\x5c\x9d\xcf\x07\x24\x41\x9b\x36\x45\x2a\x20\x28\x59\x4e\xf4\x2e\xf3\x2c\xf3\x64\x73\x70\x01\x2e\xa2\x48\xd9\x55\xdd\x5f\xcf\x77\xa6\x4f\x57\x4c\x11\x0b\xb1\x5c\xdc\x0d\x77\x31\x71\xa7\x7d\x54\xbb\xdb\x95\x6a\xa2\x54\x1f\x6f\x2c\xd7\x02\x35\x32\xa3\xc7\x95\xb5\x53\x35\x23\x2c\x00\x81\x5d\x9c\xac\x9c\x2d\x33\xb4\xe2\x84\xe8\xa4\x87\x28\xd4\xb5\x06\x4c\xa9\x60\x37\x2d\x87\xa1\x9a\x6f\x07\xf3\x01\x2c\x74\x3a\x06\x20\xdf\x3c\xe7\x1f\xd6\x3f\xb4\xc8\x87\x66\x79\x13\x4e\x86\x14\xe6\x09\x19\x52\x0a\x99\xea\xbe\x92\xf6\x62\x90\x67\xc4\xf8\xe6\x69\x01\xef\x5b\x08\xdf\x3c\xf8\x90\x19\xb2\x17\xb4\x42\xab\x58\x50\x70\x83\xb5\xca\xc2\xa0\x00\xe7\x34\x58\x2b\x63\x74\x66\xc4\x55\x4b\x35\xce\xb4\x44\xeb\x06\x98\x67\x10\xdf\xcd\x48\xb7\x51\xbe\x7d\x9d\x11\x24\x92\xc8\xc9\xa2\xa1\x21\x6d\xad\x7b\x9c\x91\x77\x92\x64\x3a\x03\xdf\x77\x9d\x8d\x2e\x0a\xd6\xb1\x35\xf9\x48\xee\xfa\x36\x57\x7d\xce\x48\x14\xe2\x48\xa3\x10\xc3\xbf\x47\x88\x63\xe9\xdd\x23\xc2\xd6\x21\x26\x26\xf9\x8e\x83\xf2\xdb\xd6\x49\x31\x71\xf7\x98\xdb\xf3\xbb\xd7\xf3\xbe\x33\x33\xab\x0a\x31\xda\xd6\xea\x3f\x0b\x0a\xd3\xa5\x11\xae\x6a\xda\xf3\xee\xbf\x34\x5e\xd2\xb2\xba\xcc\x81\x24\xef\x90\xfc\x19\x7f\x57\x2c\x5a\x1a\x14\x33\xae\x0b\x1d\xcb\x15\xfd\x6a\xc5\x15\x13\xc3\x28\xd4\x93\x95\xfa\x4f\x56\xfe\x2a\xfb\x98\x93\x28\xc8\x7b\xa0\x54\x2b\xcb\x81\xb7\x6f\xd8\xe9\x02\x84\xd9\xa4\x6e\xe0\xdc\x8a\xa6\xdc\x2e\x1d\x93\xc7\xc8\xfa\x67\x5c\x3c\x09\x93\x39\x06\x6e\x85\x93\xa3\x47\x26\x8d\x2a\x85\x48\x85\x15\x13\x7b\x2f\x23\xf9\xa3\xb5\xb0\xc0\x7b\x6b\x27\x01\x78\xdf\x6c\x0b\x2c\xf0\x36\xed\x6e\xa0\x50\xa3\xf5\xd3\x02\xef\xa9\x3d\x50\x48\x33\xa1\xaa\x92\x4c\xfa\xde\x5b\x55\x6f\x2f\x23\xea\xf9\x1b\x55\xb5\xd5\xd3\xa6\x6a\xa0\xdf\xba\x9f\xa8\x6a\x76\x9f\xd8\xeb\xab\x69\x92\x6b\x4b\xbb\xb9\x58\xe0\x87\x9e\x22\xd0\xbb\x01\xa4\xc5\x27\xf4\x12\xe3\xa7\xa7\x64\x9a\x97\xa8\x81\xea\x97\x7a\xac\x69\x31\x50\xdd\x0d\xd5\x9a\x23\x0a\x63\x45\x46\xce\x13\x78\x19\x92\x5b\x41\x61\x37\xa3\x70\x19\xfc\x9d\xcc\x9d\x8a\x53\x1d\x19\x10\x16\x14\x13\x99\x9b\xec\x9d\x93\x60\x1d\x63\x1c\x63\x2c\x4c\x0a\x17\x81\xb9\xad\x9d\x05\x4e\x14\xc3\x3c\x68\x57\x9e\x6e\x57\x95\xa7\x37\x41\xb3\x50\x68\x82\xd6\xc0\x75\xb0\x3e\x97\xf5\x4e\x60\x2c\x6e\x8e\x02\x63\x3f\x73\x1c\xb4\x26\xc9\x7e\xdd\x54\xd4\x31\x17\x48\x0b\x38\x5b\x3b\xd3\xb1\xd6\x23\x61\x18\xe3\x42\x0e\xb8\x5a\xc7\x6f\xe4\x12\x39\x53\x12\x39\xeb\xb3\x82\x07\xeb\xa9\x4f\x7a\xfd\xee\xc3\x87\xfa\x81\x19\xa6\x4c\xff\xf2\x15\x57\x1e\x06\x44\x35\xf5\x50\xbf\x93\x1b\x30\x9a\xd1\xdc\x2a\xca\xb5\x68\xfa\x21\x16\x55\x59\x5f\x2e\x29\x75\xaa\x99\x61\xfa\x1c\xf4\x6d\xf6\xcb\xe5\x15\x69\x49\xcb\x52\x68\x4f\x92\xd2\x31\x59\xfb\x66\xa2\xc1\xbf\x87\x37\x3f\xbe\x51\x08\x79\x98\xee\xc4\x8c\x3f\xab\xa9\x82\xf4\xfa\xbc\x0c\x48\xa6\x18\xa1\xe7\xab\x2b\xa4\x79\x33\x4c\xb4\x53\x94\x26\x4b\x23\x88\x9d\xa4\xaa\xcb\xda\x0b\x21\xe9\xb3\x22\x09\xcd\xcb\x80\x24\x7d\x1f\x2f\xbd\xda\xbf\xbb\x80\xf3\xa0\x29\x49\x72\x9d\x2b\x36\xf3\xad\xde\x3c\xa2\xd1\x25\x30\xe4\x90\x3d\xe4\x90\xc3\x80\x1c\x13\x45\xd1\xfe\x18\xd4\xb6\x5c\x3f\x2c\x2f\x80\x06\x8f\x19\x39\x0f\x30\xcd\x99\x5a\x04\x3d\x65\xd6\xf7\xab\x13\xfe\xf5\x2b\xc9\x97\x23\x54\xcb\x21\xca\xe5\x08\x57\x06\x18\x2c\xe7\xec\xd9\x0b\x21\xec\x33\x08\xfb\x2e\x84\x7d\x2f\xff\x58\xa8\x96\x25\xa0\x8b\xf5\xa3\x29\x8d\x5c\xf7\xd4\x3b\x97\x5c\x05\xb0\x0c\xd6\x68\x6b\xd6\xbc\x7c\x6a\x74\x7f\x63\xf9\x2e\x70\xf9\x94\x40\xdf\x90\xca\xf9\x2e\x98\x8c\x51\x5d\xe3\x2f\x16\xc4\xcb\xc9\x78\x3e\x8e\xd1\x34\xb1\x71\x35\x64\x91\xd9\x87\x29\x18\xf0\xaa\xb9\xbc\xf7\xc2\x62\x6e\x66\x1d\xf4\x14\xf7\x83\x46\x8d\xf3\xb9\xe6\xcd\x9e\x57\xa6\x8b\xe1\x0b\x68\xa1\x5f\x5f\xd6\xb6\xd5\x13\x34\x2d\x28\x7c\x08\x1a\x95\x19\x82\x18\x81\x47\xd2\x5a\x28\xca\x7d\xfd\xd6\xd6\x99\x0a\x4b\xf5\xc6\x6e\x2b\x6e\x74\x6d\x8c\xd4\x6a\x19\x24\x79\x1a\x38\x4c\xc2\xbb\x3b\x98\xc9\xd3\x80\xfc\xf4\x98\x12\x7b\x84\x4d\x98\x93\xc1\x6e\x40\xea\x6a\xb3\x59\x00\xe8\x54\x52\x72\xeb\x66\x6b\x46\x33\x93\x4a\x6d\x2c\x0b\x85\x98\xe2\xcb\xbd\x5b\x5b\x80\x77\x60\xc7\xe0\x3d\xb6\x77\x24\xf8\xcc\x96\xe0\xfb\xf6\x83\xe1\xc2\x28\xa8\x17\x14\x3e\xb6\x22\xde\x29\x79\x17\x80\xf5\xf6\xf5\xb1\xa5\xc8\x11\x5c\x04\x9a\x2f\x3e\x34\x64\x6f\x16\x10\x54\x9f\x59\x5c\x88\x44\x58\xb0\xa3\x68\xa2\x22\x82\x27\xc1\x9a\xdb\x2f\x22\x8c\xc9\xe4\x6d\xe0\x9c\xc5\xf0\x2a\x58\x6b\x8b\x7d\x1b\xc0\x8c\x9c\x98\x7f\xae\x38\xc4\x14\xc6\xc8\x88\xec\xf3\x95\x82\x99\x2c\xb2\x36\xbf\x5d\x23\xaf\xbe\x0a\x90\xdf\xfd\x2c\x40\xc0\x0e\x0e\x85\x34\xd1\x25\xb3\xb2\x5e\xa6\x57\x36\xce\x1a\x5d\x9d\x8b\x3b\x9e\xc2\xaf\xf9\x38\x1c\xf3\x24\x93\x1d\x7e\xe3\x71\xee\x73\xbf\x1e\x3f\xe0\x03\x97\xb3\x44\x5c\x77\xf4\xa2\x3d\xaf\x78\x66\xd5\xa4\xa5\x49\x00\x67\x78\x05\x73\xd4\xf7\x2e\xe9\x86\xd5\xb1\x36\xf4\x8f\x7d\x38\x0c\x88\x50\x60\x50\xbb\xf0\xb6\xbe\xc4\x3c\xb7\x70\x11\x3c\x9d\x24\x71\xca\x3b\x81\x48\xc6\x1d\x36\x09\xf1\x16\xa5\xcf\xea\xde\xd6\x07\x2c\x0a\x12\x31\xe6\x7e\x27\x13\x91\xa9\x83\x7e\x5d\x9a\xc4\xbe\x69\x3e\x88\x65\x74\x96\xb8\xd4\x2e\xea\xd4\x6e\xa9\xb5\x71\x98\x69\x17\x84\x7c\x2e\x6f\x15\xa0\x7c\x54\xdc\xef\x49\x42\x5e\x85\x7a\x8b\x0e\xee\xdf\xf3\xfa\x0e\x6f\x4d\x87\x9f\x0d\x58\x36\x6c\x66\x5e\x3f\xde\xb0\x1e\xad\x68\x5f\xb5\x69\x2d\x59\xc7\xe8\x50\x0a\x5f\xd6\x48\x8d\xbb\xfa\x5e\x11\x0f\xc7\x8b\xbf\xc5\x00\xce\xc9\x87\x40\xa7\x6e\xc7\x29\x1c\x84\x24\xc1\xe0\x39\x46\x18\x0e\x88\x16\x00\x28\x5c\x4b\x4a\xab\x79\x52\x29\xfc\x58\x1d\x58\xf5\xfa\x5b\x14\xd7\xdf\x0f\x86\xa5\x85\x17\x6a\xde\x35\x1f\x95\x27\x83\x34\x57\xda\xa8\xfe\xfc\x14\xb4\x67\x8b\xff\x11\x34\x9b\xf2\xa1\xd0\xac\x87\xf4\x3e\x70\xe6\x31\x7c\x0d\x9c\xcf\x31\xb1\x26\x5c\xa4\x61\x2a\xdf\x2a\xd8\x78\x7d\x33\x61\xb1\xbf\x13\x45\x16\xbc\x0f\x28\x7c\x5b\x73\x4a\xf7\x0b\x31\x3c\x76\xdb\x6a\xed\x13\x01\x3f\xbd\xa9\x5d\x63\xa2\x6a\x81\x65\x0e\xc9\x9f\x1a\x90\xd0\xf6\xf4\xfb\x4a\x30\xef\x3c\x4b\xe4\x98\x03\xe3\x98\x6e\x73\x46\x5e\x4a\x50\x62\x4c\x4c\x29\x1e\x05\xa6\xd5\x97\xc2\xbd\x43\x4b\x1d\xe7\x97\x9d\x72\xfd\x98\xdd\x6b\xbb\x08\xfc\x2f\x5c\x18\x07\x10\xf7\x77\x8d\x8e\x94\xaf\x36\x2d\xb6\xed\x16\x09\x39\xbb\x44\x4a\x7e\x84\x94\xfc\x18\x29\xf9\x21\x24\x8e\xe8\xef\x42\xa8\x8a\x75\xa2\x96\x7b\xa5\x8a\x5c\x9f\x26\x52\xf4\xbf\xdc\x99\x28\x32\x5b\x9b\x28\x92\xad\x4f\x14\xe9\xe5\xc5\x26\xd9\x25\xcc\x33\x92\x34\xa4\x8e\x94\x4b\xa9\x23\x43\xfc\xe1\x65\xa9\x4c\xc6\x08\x56\x18\xe1\x82\x8f\xae\x25\xd1\xe9\x81\x31\x11\xc2\xf7\x5a\x76\x49\x4e\x47\xbb\x24\x86\xa5\x0c\x93\x9c\x6a\x0b\xf7\xcc\x55\xe2\xcb\xf6\x02\x98\xeb\x1c\x0a\x62\x05\x89\x97\xa5\x16\x05\x6f\x75\x2f\xda\x2f\xf7\xf7\x88\xde\xdb\x2b\x3b\xee\xb3\x91\x65\xd9\xa2\xef\x5e\x01\x6e\xf5\xa7\xc0\x30\xec\xa2\xef\x5e\xd3\x91\xfa\xd7\xde\x55\xa8\xfd\x3a\x8f\x87\xb3\xa0\x6a\x3d\x90\x38\x75\x25\xc9\x5c\x0a\xcc\x6d\x76\x68\x2d\x95\x0c\xa5\x11\x41\xe3\x08\x3e\xaa\x75\xab\x0c\xe2\x32\x80\x79\x48\x14\x8d\x54\x83\xf8\xeb\x1f\xdc\x6a\x9c\xa8\xea\x67\x08\xbb\x49\xdd\xa6\xcb\x54\x62\xfb\xcb\x95\x6a\xb2\xf9\x1e\x11\xa6\x04\xd5\xb2\xae\x71\xbe\x08\xdd\x46\x26\x4d\xf7\xc8\x6c\x4c\xd8\xe8\x4d\x69\xdf\x1d\x5d\x10\x01\xf5\x03\x4e\xcd\x85\x91\xc9\x40\x68\x61\x32\x17\x05\x1c\x16\x2d\x2d\x1e\xce\x89\x84\x0f\x89\x56\xfb\x9c\x26\x20\xfb\xde\x2e\xcc\x08\x77\xb5\xeb\x57\xb1\x30\x89\x59\x18\x04\xbd\x9e\x3b\xef\xa1\x1f\xba\x31\x3d\x59\x50\x08\xdc\x75\xd8\x37\xfe\xc3\x19\x94\xbc\xe9\x32\x3a\xee\x08\x85\x88\x7a\x43\xed\xf5\xac\xa6\xef\xba\xeb\x4d\x99\xaa\xbd\xc9\x7a\x6f\xd2\xdc\xa5\x8b\x3e\xab\xf6\x8b\x91\x49\x13\xc5\x6a\x4a\x5c\xe2\xb4\x15\x31\xbd\x93\x64\x4e\x5c\x9c\x3f\x9c\x6a\x65\xa3\xdb\x7c\xad\x3f\xf8\x43\xe4\x86\x87\xe7\x78\x93\xd4\xe8\x0b\xa9\xc6\xc7\xfb\x6e\x75\x84\xc5\xab\xbe\x5b\x20\x6a\x5e\x91\xb0\x8d\x57\x60\x6e\x45\xa8\x8a\x72\xe6\x2a\xaf\x62\x80\x4c\x5f\xa6\xf1\x12\xf4\x0f\xc9\x9f\x59\x1f\xef\xcd\xfa\xec\x7b\x01\xb0\x18\x26\xc6\xcd\xbf\x98\x5b\x64\xb8\xe0\x39\x6c\xa9\x29\xeb\xeb\xfb\x33\x4f\x37\x5f\xd4\x1d\x26\xab\xfd\x3c\x7c\x58\x3c\xe6\x9d\x26\xd8\x69\xe8\x24\x26\xb3\xab\x8b\xc2\xb8\xfe\x0c\x03\x56\xfd\x18\x6e\x46\x82\xc6\xd4\x89\x44\x41\x11\x1f\xb2\xfc\xc1\x83\x21\xdf\xfa\x23\x1e\xcd\x48\xea\x82\xe8\x6d\x03\x53\x92\x47\xe4\x42\xbc\x31\x34\xbf\x31\x39\x7f\xbe\x54\xb9\x28\x55\xb8\x50\x1e\x92\x3f\xb9\x99\x45\xa1\x04\xa4\xe0\xbb\xed\xdc\x4b\xe4\x6a\x9b\x0c\x54\x94\x36\x6d\x79\x61\xa4\xb1\x4a\x33\x75\x66\x65\xd9\x77\xb5\x9d\x95\x0b\x7c\x43\xf1\xd6\x81\x0b\xbc\x87\x30\xa8\x4f\x49\xb7\xf5\xf3\x3a\x9d\x4d\x8c\x96\x46\xaa\xe6\xd8\x75\xde\xc3\x65\x3b\x09\x34\xa2\x6c\x3e\x92\xaf\x42\xa7\x79\x6e\x8c\x8b\x60\x2a\x3d\xb8\x20\x3a\xcf\xf9\xc3\x87\x17\x84\x1b\x2e\x05\x75\x6b\x6b\x16\x65\x4f\x02\xde\x1a\x9b\x75\xb9\x70\x9b\x44\x6e\xcd\xf4\xd0\x42\xf1\xa8\x07\x39\x46\x9f\x38\xae\x5d\xa3\xba\x2e\x39\x45\x79\x65\xe2\xc2\x8c\x7c\x90\x30\x04\x49\xe1\xad\x49\xe6\x9d\x39\x68\x69\x0e\xc2\xd1\x59\xe8\x32\x90\xf4\xd1\xe6\xaf\x01\xc5\xe3\x3b\x23\x87\x09\xcc\xc8\xd4\x05\xa1\x95\xbc\xcd\xfd\x65\xd8\x9f\xd6\xdc\xc7\xce\x57\xa1\xbe\x9e\x39\xf1\xa3\x4c\xd1\x9c\x47\x12\x4c\x9e\xa1\x3c\xcd\xf4\xa5\x4b\xb8\xe1\x2b\xb9\x84\x98\x93\xfe\x63\xad\x4a\x06\x4c\x7c\x78\x23\x91\x93\x2d\xcd\x2c\x46\xc3\x47\x5b\xbf\x91\x6c\x43\x6e\x90\xb8\x27\xe8\xa3\x98\xda\x83\x05\x85\xd9\x3a\x64\x95\x23\x87\x5c\x58\x7f\xc0\xfb\xac\xef\xfe\xfa\x55\x47\x08\xf9\x31\x66\xb0\x7c\xcf\x8d\xb6\x47\xea\x39\xf7\xda\xc8\x14\xa7\x1a\x3b\x1e\x08\x87\x83\x74\x76\x89\x54\x58\x9f\x29\x1a\xcf\xa3\x94\x6b\xac\xf4\x15\x25\xcb\xa2\x73\x73\x43\x0a\x52\xf1\xb4\x0b\x0a\x73\xb7\x49\xff\x78\x42\x62\xda\xbf\x4a\xc2\x18\xc9\x03\xdc\xb4\xc1\x84\x95\x2b\xc7\xd5\x5f\xa1\xc8\xbb\xde\xfd\x51\x6c\xcf\x5d\x32\x27\x33\x57\xef\x82\xde\x5b\x83\x41\xaf\x5b\x30\xe8\x72\x67\xb4\xd0\x64\x37\xc0\x54\x3e\x9d\x8d\xfe\xe6\xe3\xdf\xc6\x9c\xcc\xc8\x8d\xa1\x51\xbf\x91\x61\x0f\x35\x11\x3b\xf7\x1c\xf3\x20\x1f\xf2\xd0\x9e\x93\x6b\x8d\xe9\x67\xe4\xc2\x2d\xbc\x9d\x8f\x5c\xe7\x15\x1c\xbb\x8d\x57\x4c\x79\x38\xb2\x82\xef\x79\x5a\xc6\x11\xe3\x55\x4a\xca\x2b\x94\x54\x13\xec\x4b\x5b\x2a\x16\xe0\xbe\x54\xb5\x16\x4d\x64\x4e\x42\x3d\xd4\x7d\xc2\xe1\x27\x3b\x57\x9d\xa9\xe5\xe7\x7d\x76\x0e\xde\xd4\xde\x25\xbc\xef\x61\xdc\x16\x34\x21\x53\x42\x92\x1b\xd9\xa7\x8b\x65\x3e\x09\xcb\x9e\xaf\xeb\x32\x2b\x7a\xcc\x59\xa5\x8c\xaa\xc9\x4c\x6b\xfd\x55\x32\xda\x61\x73\x77\xac\xa7\x57\xf2\x40\xdb\xb5\x1a\x6c\x4b\xd5\xd0\x7e\xbb\xd5\x7a\x8f\xeb\x3d\xbd\xae\xf7\xb4\x55\xef\x69\xbf\x5e\xa3\x30\x52\x67\x4e\x3e\x19\x35\x55\x86\x29\x46\xf7\x09\x83\x9f\x6e\x64\x7b\x18\x60\xe4\x9c\x8e\x4e\xed\x19\xe9\x72\x0c\xeb\xa6\x5f\xb0\xbe\x1b\xd9\x88\xb8\x87\x03\x9d\xad\xe8\xc8\x85\x1d\x57\x57\x87\x12\xcf\x37\x4a\x12\x9f\x50\x6f\xa9\x56\x68\x01\x6e\x46\x21\x55\x9f\xf1\xf6\xf4\xd5\x54\x75\xb6\x2b\x81\xe7\x38\x72\x7c\xb9\x02\xea\xb5\x6b\xec\x26\xce\xda\x00\x19\xfd\x02\xc1\xfb\xa2\x6f\x2d\xae\xdc\x75\x0e\x6d\xde\x4b\x5b\x80\xf7\x5e\xd5\xff\x64\x4b\xf0\xb6\x6c\xae\x1a\xbd\x74\x9d\x2f\x02\xce\x5d\xe7\x51\xff\xbf\x1f\xc1\x9e\xeb\xbc\x10\x64\xf8\x68\x40\x61\xff\x9e\xf0\x5e\x09\xc0\xb7\x85\x86\x7e\xac\xdf\x1d\x71\x5b\xef\xdc\x0b\x1b\x11\xec\x41\x06\x12\x43\x21\x54\xb7\xa8\x0e\x30\xda\x53\x1b\xd7\x3d\x91\xa0\xa6\x66\xa1\x44\xec\x7d\xb1\x2d\x6b\x01\xf9\x6e\xf0\xbe\xfb\x42\xad\x63\xe1\xbc\x8d\x58\xd7\x1c\xa0\xd7\xe6\x00\xe5\x52\x52\x71\x80\xee\x0d\x5a\xe5\xb1\x60\xcf\x57\xc6\x97\xdd\x3d\xbc\x1c\x58\xda\xf3\x86\xbb\xc9\x42\x81\x52\x46\xa9\x99\x0b\xa5\x8d\x90\x9b\x0f\x11\xfd\xd3\xd7\x1c\x23\x77\x6f\xf5\x78\x34\x8d\xdf\x38\xbd\x9b\xee\x9a\x71\x92\x7b\x6b\x23\x52\xcc\x8c\xa8\x90\xd1\x05\x06\x63\x6a\x81\xd4\x05\x85\x0f\xeb\x25\xfa\x3c\x5f\x67\x6e\xe9\xb4\xeb\xb6\xa8\x7b\xf3\x10\x4c\x2e\x43\xc1\x9d\xed\xa1\xe4\xce\xba\x28\xba\xbb\x3b\x10\xa8\x5f\xef\xc0\x75\xb2\x0d\xeb\xbf\x1e\xe5\xaa\xb5\xd4\xe1\x2e\x71\x8b\x04\x7c\xab\x6a\x47\x9c\xbc\x42\xb6\xef\x8c\xbc\x55\x2c\xd2\x00\x37\x3a\xc2\x5b\x23\xbf\x21\xaf\x67\x54\x8b\x0f\x15\x3b\x11\x5a\x99\x23\xf2\xf0\xf6\xec\x57\x26\x09\x29\x86\x80\x44\x86\xa4\x75\xc3\xc3\x05\xb8\x92\x9a\x5c\xa5\x68\xc2\xd9\x96\x44\xa7\xc4\xc4\xef\x12\xb8\xd0\x2a\x56\x0d\x6b\x99\x76\x52\xb6\x16\xc8\xa8\xb9\x0a\xca\x5e\x04\x77\x7c\x14\x4b\x5d\x09\x97\x01\x69\x45\x54\x8a\xc7\x40\x54\x85\x6c\x52\xc5\x79\xe9\x9c\x78\x48\xb7\x45\x9d\xf0\x9c\x93\x28\x01\xb6\x1a\x5a\x33\xc4\xb7\x2b\x90\x12\x67\x24\x52\xb3\xc0\xb0\x58\xf4\xf9\xca\xb6\xf8\x6a\x04\x7b\x76\x04\x2e\xb3\xfd\xbe\xbb\x8c\x21\xd4\xc2\x4c\x9d\x19\x89\x5d\x90\xc0\x28\x74\x9d\x19\x91\xea\x39\x59\xe9\x69\x39\x9c\x84\xec\xb3\xcb\x51\x60\x87\x18\x55\x02\x7f\x84\x89\xed\x01\xeb\xda\x5d\xf5\xa1\xa9\x82\x5c\xa4\xcd\x58\x36\x23\xfb\x66\x03\xbf\x71\x10\x99\x3a\x1d\x07\x01\xca\x86\xd4\x2e\xcb\xbe\xe6\x65\x6f\x4c\x19\x94\x65\x59\x86\x65\x9f\x03\xc2\xab\xd4\xba\x72\x98\x85\x3e\x72\x42\x1d\xb9\x94\xec\x13\x09\x3f\x31\xab\x2d\x1a\xe7\x96\x11\xcf\x9e\xae\x6d\x71\xd4\xd0\xe2\xf7\xb5\x2d\x8e\x1b\x5a\x0c\xeb\xa7\xc3\x3d\xa9\x9f\x8e\x6d\x5c\xfa\xb1\x33\x23\x1e\xda\x41\x41\xa2\x16\x7f\x8c\x67\x65\xdc\x77\xe1\xd2\x19\xf7\x3d\x98\x38\xe5\x0a\x48\xbd\x02\x97\x14\xc6\x4e\x4a\x66\xe4\x83\x0b\xdd\xbe\x7b\x8d\xeb\x34\x71\xfc\x51\x85\x15\xc2\xf1\x8d\x29\x4c\xbe\x53\x6a\x4f\xea\x1b\x89\x5b\x74\x82\xcc\xf4\xa4\x40\xdb\x13\xe7\x86\x1c\xbb\xe0\xa2\xbd\x23\x53\xa0\x70\xe9\x4c\xaa\x39\xc6\xf5\x3c\x98\x3d\x75\x26\x38\x93\x72\x60\x3c\x1f\x58\x81\x1d\xa7\xce\x0d\xd9\x2f\x3b\x0b\x55\x67\xd3\xd5\xce\x76\xec\x69\xad\xab\xac\xd6\xd5\xb3\x3a\x96\x79\x53\x5f\xc7\xa1\x46\x33\x17\x55\xd5\x6a\x09\xb1\x68\x54\x76\x01\xa2\xcf\x5e\xd3\xd1\x8c\x7c\x2b\x7e\xd8\x33\xf2\xa5\xf8\x01\xde\x0f\x8d\xb3\xbf\x06\xe4\xc1\xb0\x82\x8d\x2f\x11\x79\x5d\x38\x5b\x8e\xe3\x90\x0b\xe7\x9c\x04\x6a\x68\x8a\x00\x3f\x7c\x78\xd1\x77\x47\x88\xa2\x3e\x28\xc1\xe5\xab\x20\x17\x48\x80\x29\xb5\xc7\xc9\xea\x40\x2e\xd4\x37\x2e\x8b\x43\xf1\x35\x20\x97\x6a\xde\x49\x02\x2c\x83\x2b\x4e\x2e\x8d\x62\x67\x41\xe1\xd4\x75\xe6\x31\xbc\x73\xab\x4a\x6c\x0c\x56\xf8\x8a\xcd\x3f\x06\x5f\x39\xbf\xb6\xe0\xd4\xa5\xf0\xb1\x4d\x58\x8d\x81\xb7\xa7\x51\xce\x23\x17\x2e\x5b\xb1\xe4\x71\x0b\x97\x93\xe6\x94\x21\x0b\xcb\xe4\x39\x35\xf4\x33\x30\xe9\x93\x57\xbf\xc7\xeb\xc1\x78\xcd\x87\x6b\xd1\x78\xcd\x87\xeb\xb7\x43\xc5\x97\x1b\x70\x9a\x77\x63\xf3\x62\x25\xf5\x1a\x4a\xb5\x86\x6f\x38\xd1\xde\x0b\x4c\x95\x53\x0a\xef\x5c\x52\x18\xb0\x1d\xae\xe5\xdd\x4c\x5e\xad\xaa\xd3\x0e\x86\xef\x70\x8d\x99\xc9\x17\xb1\xd6\xa4\x3c\xd5\x6e\x05\x0b\x0a\xb7\xae\x33\x23\x5f\xb4\x6a\xd0\x68\xb7\xdf\xbd\xb2\xbe\x53\xf4\x1d\x7a\xe5\x3a\xd7\x31\xbc\x6d\x25\xe8\xef\x25\x89\x1f\xa1\xd8\xff\xc6\xbd\xbf\x73\xa1\xd8\x88\xeb\xee\x2c\xda\x3a\x81\xf7\xdd\x4f\x18\x60\xa0\xac\xc9\xb5\x37\x17\x08\x47\xe4\x7e\x83\x07\xad\x60\x54\xa5\x96\x6f\x34\x92\x9a\x91\xb7\x2e\xc4\x98\x30\xfd\x09\xdf\x36\xc1\x48\x3e\xb7\xdf\x4e\x30\x32\x23\x07\x46\xce\xeb\xb3\xaf\x0b\x0a\x5f\xdc\xf6\xab\x97\x59\x08\x9b\xdb\xfa\x1b\x45\x33\x78\x32\xc0\xfd\x7b\xb1\xbe\xdd\x90\x6f\xe9\x71\xe1\xcd\xd4\xfa\xba\x4f\x06\x65\xf7\xaa\xfa\xa7\x56\x65\x7b\x6d\xfc\x93\xff\xbf\x24\xea\xa7\xf0\xde\x6d\xbf\x29\x1f\xfc\x11\x8f\xe6\xe4\xbd\x0b\xf1\xff\xf9\x3f\x43\xd8\x25\x42\x6d\xc3\xf0\x61\x3c\xda\x25\x12\x04\xb5\x25\xb5\x95\x80\xf5\x75\x8d\x16\xeb\xbd\x16\x73\x51\xf1\x0d\xdf\xd6\x7c\x6b\x97\xcc\xc8\x57\x17\xe2\xde\x98\x13\x49\xe1\x08\xf5\x53\x1a\xac\xe2\xb4\xbd\xff\x6f\xaa\x7f\x6b\x80\xa9\x3e\xf4\x26\x8a\xf4\xee\x3d\x2f\x60\x77\xc8\xb7\xb0\x91\x4c\xef\x07\xb9\xee\x96\x82\x40\x71\x4f\x27\x7a\x9e\x3a\x33\xf2\x42\xc0\x00\xd3\x4c\xa5\x8e\x64\xc0\xd4\xab\x93\x00\x4e\x5d\xc8\x52\x0a\xde\x9a\xa9\x9d\x4b\x20\x0a\x87\xb7\x5e\xf8\x4d\xb2\xf4\x92\xe8\x4b\x5a\xb1\xa0\x14\xfe\xfc\x9e\xfb\x15\xab\x5d\x49\xd2\x7b\x46\x67\x09\xd3\xf6\x5d\xd1\xf6\x17\x9f\x3e\x1e\x19\x03\x0c\x6d\x6e\x0d\xc1\x6a\xdf\x71\xfd\xe6\xbc\xc8\x98\xa2\xb3\xa4\x7c\x45\x03\xd7\x73\x62\x99\x5b\x9d\xd4\x82\x19\xf1\x52\xf8\x96\x01\x11\x8e\xa0\x7d\xf7\x5a\x5f\xb1\x95\x1e\x4c\x2c\x25\x02\xf3\x7d\xaa\xd7\xb9\xe7\x13\xbe\x64\xa7\xfa\xa5\xce\x6c\x82\xf1\x7f\x4e\x5d\xf5\xfe\xc4\xbc\x4f\xc6\x63\x1e\xcb\xfc\xed\xb1\xb9\x41\xf4\x2d\x9d\xda\xfc\x95\xab\xd3\x9d\x9f\xe2\x75\x98\x6f\xb2\x09\xa1\x07\x6c\x0a\x16\x9b\x4c\xa2\x50\x07\xc0\x7a\x74\x95\x26\x98\xa9\xf2\x75\x4d\x27\xf9\x36\x20\x73\x12\xa6\xb8\x22\xb7\x5a\x6f\xec\xb6\xae\x37\x3b\x56\xbc\xca\x31\x30\xe4\xfd\x4e\x60\x6a\x0b\x48\xed\xb8\x9f\x02\x3b\xb5\x31\x3b\xbe\xeb\xdb\x98\x56\xc6\xfd\x6c\x63\xd2\xfc\x3d\x25\x6b\x2f\x28\xa4\xad\xe0\x11\x84\x04\x21\x78\x83\x69\xef\x3b\x0a\x51\xea\x34\x8a\x27\x52\x84\x63\x42\x17\xe0\xa7\xc6\xa1\x68\x66\xc1\x93\xc1\xf6\xef\xfc\x31\x2e\x8a\x6f\x01\x06\x5c\xc0\x1f\x97\x16\x6c\x3d\x31\xcf\x63\x4b\x63\x76\xb5\x25\x16\x1e\x94\xef\x14\xa6\xeb\x0f\xd7\xd3\x15\x3f\x3b\x7c\x59\x71\x99\xfb\xf5\xeb\xe9\x42\x0d\xfb\x69\xef\x3e\x96\xb5\xff\x89\x94\x7d\x66\xf9\xba\x69\x0b\xf5\x63\x8c\xc4\x74\x34\xb4\x07\x45\x38\xe3\xbb\xec\x7f\xb7\x86\xb5\x61\x3e\x7e\xb6\x21\x6b\x23\x7d\x36\xc8\x5f\x15\x83\x1d\x6e\x16\xef\x8a\x01\x0f\x1f\x0f\xf3\x77\x05\x6d\x18\xfe\x5e\xbc\x2b\xe8\xc3\xe6\x70\x33\x7f\x57\xd0\x88\xcd\xed\xad\xfc\x5d\x41\x27\x36\x9f\x16\xef\x2a\x81\x1c\x06\xdb\x1b\xb2\xbe\x32\x5b\x5b\xdb\x1b\x68\x89\x31\x4e\xef\xe6\x52\x2e\x5b\xeb\x78\x6a\xf5\x36\x66\xa4\x8b\x67\x85\x6e\x28\xe6\x7a\x92\xb6\x92\xd9\xfa\xd2\x6e\xd5\x21\x40\xef\xc6\xe6\x33\x7b\xf3\xf7\x3a\x2c\xd4\x69\xee\xd6\xa0\x4e\x74\x87\xf5\xd0\x1b\x83\x7c\x69\xeb\x91\x34\xea\x51\x33\x06\xf5\x08\x19\xc3\xd5\x45\x5c\x59\x41\xa4\xaf\x17\xe9\x9d\xb7\x14\x33\x32\xd1\xab\x03\x99\x93\x30\x63\xea\x39\xdc\xfc\xc3\xc9\x7e\xfd\x3a\x46\xcf\xb4\xf2\xda\xf5\x27\xfb\x6a\x4b\x60\x13\x5b\x80\xbb\x65\xc7\x0b\xc3\xc4\x85\x8c\x64\x1b\x43\x0a\xd2\x91\x3d\x64\x58\x67\x69\xfb\x7d\xd7\x8c\x8c\xcd\xf7\xf0\x7c\x0a\x73\x19\x35\x4f\xb5\xf5\x92\xcb\x9a\xf5\x1c\x6c\xb2\xa0\x70\x63\x2a\xcd\xd3\x86\x4a\x24\xc1\xdd\xde\xa4\x8f\xb6\x7e\x29\xa1\xf2\xba\xe5\x4c\x01\x77\xc4\xf3\xb6\x33\x2f\x9d\x00\x49\xb3\xc7\x88\xa4\x1b\xc3\x15\x7f\x86\xcb\x14\x74\x05\x22\x9d\x9b\x54\x3d\x84\x8c\x6c\xfd\x26\x7b\x9b\xb4\x21\xc8\x75\x51\x7b\xae\xaa\x16\x72\xe9\x32\x60\xf0\xde\x8c\x4c\x53\xc4\xf0\x35\x08\x31\x25\xc3\xa2\xe4\x49\xad\x64\xb3\x28\x79\x5a\x2b\xd9\x2a\x4a\x7e\xaf\x95\x6c\x17\x25\xcf\x6a\x25\x8f\x8b\x92\x12\xb0\x4c\xd1\x13\x55\x54\x83\x30\x24\xdf\x3b\x6b\xc8\xf7\x31\x9a\x7f\xfd\x31\x18\xc5\xf6\xe0\x8f\x63\x64\xda\x46\x02\x79\xb6\xa3\x35\xad\xea\x67\x76\x4e\x76\xd4\x12\x94\x60\xaa\xe3\x86\xac\x61\x5d\x8e\x52\xc0\x64\x9b\x06\xc8\x3e\xb9\xf9\xd3\xe7\x92\xd9\x7e\x9d\xae\x73\x24\x52\x64\xe8\xb7\x78\xe3\x09\xdf\xfe\x4d\x60\xa0\x29\xb9\xa1\x44\xbd\xb3\xd6\x8f\xde\x90\xd7\xea\xa3\x5f\x8a\x4f\xfd\x28\x9e\x44\x31\x90\x17\xe5\xe7\xaf\xda\xf0\x3d\x92\x55\x43\xaf\x48\xaa\xb8\xbf\x63\xdd\x5e\x2d\xc9\x99\x7e\xec\x49\xfa\xe8\x09\xdf\x46\x20\x7f\x99\xb6\x5e\x58\xa7\x48\xa1\x25\x64\x8a\x37\x52\x2d\x83\x90\x70\xe4\x34\xd4\x43\x4f\x4d\x2e\xa3\x68\x1f\x71\x95\x16\x66\xc5\x17\x24\x83\xf2\x12\x51\x3c\xe7\x65\x65\x59\xf1\x3c\x95\x60\x5a\x71\x4a\x47\x1c\x1d\x36\xce\xd7\xec\xe9\x9c\xbc\x4c\xf1\xb2\xed\x3a\x45\xdb\xd3\xe3\x14\x8b\x29\x0c\xd0\x9e\xbb\xa9\x65\x3d\x29\x55\x71\x28\x72\x02\xb2\x6d\xd7\x3b\x37\xbd\x42\xb9\x19\xf8\x13\x37\xc3\x3c\x09\x53\x65\x50\xa8\xc8\xb6\xfe\x7a\x37\x83\x4a\xf3\xcd\xfb\x36\x1f\x2c\x35\x1b\x96\xcd\xce\x53\x18\x0e\x75\x10\x85\x65\xf4\x81\x45\x9b\x95\x92\xc1\x52\xc9\xa0\x52\x52\xeb\xae\x52\xb2\xb5\x54\xb2\x55\x29\xd9\x5e\x2a\xd9\xae\x94\x3c\x5e\x2a\x79\x5c\x29\x79\xb2\x54\xf2\xa4\x52\xf2\x74\xa9\xe4\x69\xa5\xe4\xf7\xa5\x92\xdf\x2b\x25\xcf\x96\x4a\x9e\xe9\x92\x7a\x98\x7f\x9c\x90\xf1\xb4\x5c\x50\xd8\x6f\x87\x77\x64\xaa\x23\x56\xf2\x7b\x3f\xd9\x95\x2d\x80\x5d\xdb\x79\x89\xc2\xeb\x37\x9a\x80\xc5\xc0\x04\xaa\x90\x6f\xd5\xe3\x8e\xd6\xb3\x7c\x40\x21\x69\x37\x5d\x1b\x2f\xf2\xd8\x3e\x44\x03\x07\x76\xa2\x1e\x62\x0a\x53\x7b\x4e\xf6\x53\x40\x7f\x63\xb4\xef\x23\x69\x0a\x4f\x37\xf9\x63\x84\x70\x4e\x21\x55\x15\x91\xf5\xb7\x2c\xc8\x18\xc1\x48\x55\x94\x2a\xfe\xfb\x90\x93\x0f\xe9\x72\x0b\xaa\xf8\xf1\x1d\xa9\xb8\x71\x5d\xaa\x7a\xd9\xb3\x1f\x20\x3d\x3f\x6d\xc5\x40\x88\x38\x7a\xb1\x42\x9e\x0b\x0a\xef\xd2\x3b\x9c\x89\x8c\xf1\x6b\x25\x3e\xcd\xc7\xb4\x5d\x01\xe3\xeb\xd4\x1a\x27\xe8\x6a\xa3\x64\x09\x07\x05\x05\xcf\x41\xf9\x21\x71\x76\x33\xe2\x33\x92\xa5\x84\x69\x23\x8c\xe2\xb7\x57\x5e\x34\x98\x95\xcb\xf2\x95\xe3\xc5\xca\x99\x88\xb2\xf5\x65\x9a\x91\xd3\x54\xfb\x7c\x54\xd7\xca\xd3\x0b\xa4\x57\xbb\x58\x23\x56\x59\xa3\xeb\xd8\x61\xad\x91\x77\xab\xfb\x98\xa9\x91\x70\xd5\xb5\x54\x5d\x32\x70\xaf\x95\x5c\xfa\x59\x71\x37\x47\xb6\x07\xee\xa9\x8d\x01\x9d\xcf\xd6\x46\xdc\xbd\xb0\x39\xb8\x97\xaa\x8b\x44\xb5\x3e\x33\x41\x47\x85\x83\xc1\x2b\xc2\xf4\xf5\x8f\x8c\x45\x18\xb7\x22\x48\x10\x3e\x1e\x0c\xf2\xc8\xb2\xe9\x21\xbf\xe0\x37\x16\x04\x26\x00\xc5\x94\x45\x19\x2f\xe3\x64\x54\x63\xc5\x9e\xe9\x5b\xe5\x53\xe1\x54\x63\xcc\xde\x15\x56\x37\xcf\xc9\xf7\x97\x42\xea\xe6\x09\xfe\xfe\x7e\x3c\xdd\x96\xd8\xb6\x87\xc6\x94\xfc\x30\x75\xda\xe3\x87\x60\xc0\x91\x53\x13\x70\x44\xc9\xcc\xf9\x6a\x14\x52\x75\xf1\xa2\x14\xbf\xf3\x57\xb5\x00\x25\x2b\xe1\x48\x4a\xe9\xff\x24\x21\x9f\x04\x8e\xf4\xba\x0c\x18\x02\x27\x8d\x34\xac\x45\xb7\x60\x81\xf8\x5e\xcd\xf8\xb9\x9f\x80\x84\xd2\x32\xff\x30\xd5\x1a\xdf\xdb\xd4\x21\x3c\x71\x82\xb0\x1e\x05\x3b\x26\x3b\x31\xe1\x09\x79\xc5\x24\xef\xc7\xc9\x8c\xe8\x1b\x62\x0a\xaf\xd2\xaa\xce\xdd\x04\x92\x7f\xa9\x66\x9b\x08\xad\x73\x7f\x9b\xde\x15\x3b\x33\x77\x42\x32\x31\x79\x46\x26\x3c\xd2\xc8\x84\x77\x1a\x19\x37\xb1\xd1\x4c\x92\x29\x31\x5e\x3d\x7d\x06\x1c\x6d\x08\x19\x26\xae\x41\x6f\x1a\x66\xbc\x6a\x32\xf3\x97\x9b\xbf\xd2\xfc\x35\xc8\xe3\x85\x50\xe2\xfd\x0d\x39\x53\x67\xd7\xb2\xe0\xc1\xd0\x40\xba\x12\xdb\xdf\xa4\x8e\x3a\x6e\x96\xa5\xce\x9b\xfa\xf7\xd4\xd6\x89\xe2\x0d\xa2\xbb\xb6\x5f\x08\x75\xec\xf0\xe5\x02\x0e\x9a\xd4\x3b\xe7\x0a\xa1\xc4\x14\x7c\x54\xae\xea\xc0\x39\xe8\xe6\xa6\x93\x2c\x94\xd7\xa1\xcf\xf1\x7e\xb5\xbc\x84\x51\x9c\xa7\xc2\x8a\x78\x2b\x62\x74\xb3\x0a\x01\x5a\x2f\x59\xfc\x0f\xd9\x71\x79\x27\x8c\x31\xef\xd2\x84\xa5\xd2\xd2\x08\x11\xb5\xc9\xcd\xe8\x10\xaf\xae\x7d\x40\x53\xbc\x63\x6d\x8d\xae\xd3\xf7\xb8\x9f\xd1\xe0\x94\x9d\xe6\x1f\xde\xcd\x48\x40\xde\xa6\xb0\xd6\xeb\x69\x9f\xbc\x49\x41\x2d\x8e\x50\x6b\x83\x81\xc0\xb3\x65\x5c\xc4\xf1\x3a\xfd\x12\x27\xdf\x65\x44\xf6\xdd\x33\x7c\xe0\xf8\xa0\xb0\x2c\x3e\xcc\xc8\x41\x0a\xea\x11\x84\xfa\x8d\x90\xf7\xa5\x95\x08\x1c\xe3\x2f\xe4\xcc\x15\xeb\xf6\xa2\x9d\xa8\xca\x56\x49\x69\x4e\x5e\x28\x56\x63\xb8\xf9\x9b\x00\x5e\x33\xaa\x70\x99\x42\xef\xcc\x19\x6e\xfe\xa6\xc6\xb7\xd5\x1b\xd2\x0d\x25\x9e\x65\x7d\x36\xa1\xea\x87\x00\x4f\x49\x8d\x5a\x67\xbe\x09\x8c\x2a\xf1\x91\x39\x28\x1b\x32\x18\x6e\x2a\xd1\xab\x94\x07\x98\x91\x07\x14\x31\xd8\x98\x91\x2f\x6a\xa6\xec\xab\x16\x09\x90\x40\xd4\x38\x27\xbe\xf1\xf4\x37\x51\xc7\x58\x7c\x03\x11\xf3\x8f\x66\xea\xbe\x12\xbc\x6e\x85\xef\xd4\x3a\x2f\x4e\x37\x0a\x21\x69\xdb\x8e\x9d\xe1\x63\x10\x0e\xc6\xb9\x35\xc1\x66\x1a\x22\x92\x6c\xe5\xf5\x50\x9e\x68\xaf\xb7\x99\xd7\x43\xf1\x63\x4d\xc5\xe1\x12\xd3\x29\x01\xb7\x62\x2b\xe7\x3e\xd5\x7c\x14\x40\x9c\x99\xc7\x15\xb6\xb2\x6c\x33\x5c\xdf\x66\x60\xc7\xce\xa6\x9a\xdf\xe6\xba\xd1\x98\x4a\xeb\x96\x00\x57\x60\x08\xc2\x79\xda\x5c\xa9\x8c\xd2\xb2\xa6\x16\x92\xd5\x35\x12\xd8\x8f\x54\x81\x52\x0c\x3c\xd5\x36\xab\xef\xef\xa8\xbb\x55\xa9\xfb\xb5\xfd\x04\xcc\xc9\x5e\x9a\x47\x39\x2b\xe4\x1f\x0e\x92\x8e\xa4\x8d\x1d\xc5\xb8\x8c\xe8\x5c\xf9\xad\x01\x75\xf5\x1c\x14\x02\xf0\x4b\xcf\x5b\x07\x1b\x47\xcd\x2d\x7f\x34\xb6\xac\x0e\x5d\xd4\x5a\x66\xb9\x99\x49\x83\xe2\x27\x0f\x81\xcb\x97\xcc\x43\x0e\x5a\x3c\x65\x74\x0a\x84\x45\xc5\x9b\x39\x1e\x9d\xab\x33\xbc\x03\x5c\xfd\xb9\xa6\xd4\x3e\x27\xf8\xb8\x43\x01\x5f\x2c\xb4\x40\x39\x09\x40\x63\x78\x3c\xf0\xcf\xdb\x7d\x46\xf6\x49\x06\x3f\xd9\x44\xb1\x73\x13\xa6\xfd\x22\x6b\x12\x4d\xa5\x06\xee\xc4\xa6\x9a\xb8\x5e\x85\x21\xf4\x86\xea\x57\xf9\x5e\x6a\x12\x5a\x83\x78\xdd\x85\x28\x0d\x24\x6a\x02\x50\x5e\xbe\x23\x17\xb4\xea\x8f\xc8\x9b\x6c\x9e\x2b\x1e\xc6\x17\x18\x55\xbc\xb0\xcc\x7e\x5e\x22\x0a\x22\x94\x20\x8f\xe5\x98\xff\x85\x11\xf5\x12\x1f\x70\xb0\x43\x1c\x77\xec\x28\xd6\x1e\x59\xf6\x1e\x32\xfe\x62\x43\x7b\x86\xb2\x56\x26\xbf\xdf\x1d\x09\xed\xda\xc1\xf1\x22\x54\x6b\x13\x3c\x67\x25\x5f\x53\xd6\x67\xa2\x16\x5e\x0e\xb7\xee\x56\x6d\xc9\x55\x25\xc5\x37\x28\xda\x5a\x29\x28\xfc\xc7\x58\x1e\x08\xa0\xc8\x64\x54\xf4\xa3\x13\x9b\xef\xc8\x7c\xf2\x4b\x04\x37\x2f\x46\x0e\x9d\x2e\x8c\x2b\x7e\x6b\x0f\x3a\xbd\x78\xc5\xb4\x7e\x3f\x04\xbd\x76\x42\x2f\xce\x05\xc2\x05\x1d\x61\x23\x2d\x61\x09\x04\xbc\xa5\xcf\x10\x0a\x09\x86\xfc\xf2\x1c\xaf\xbc\x30\xd6\x3b\x7b\x65\x7b\x5a\x0a\x54\x12\x00\x82\xa9\x12\xfb\x12\x25\xf6\xa9\x77\x09\x20\x00\x2f\x0a\xe9\x57\xbb\xbd\xab\xb5\x09\xd0\x69\xc9\xc5\x28\x03\xed\x12\xd5\x1f\x83\xd1\xc0\x46\x52\x66\xac\xe5\x13\x67\x4e\x04\x84\x10\xb4\xdd\x9e\xc5\xa3\x19\x89\x23\x98\x91\x14\x9e\x0c\x00\xa3\x7e\xdb\x33\xf2\x2d\xc5\x37\x9b\xdb\xfa\xcd\xa2\x64\x2e\xf5\x3c\xae\xed\xa4\xef\xaa\x51\x27\x18\x89\xbf\xc8\x66\x85\xc3\x0b\xf4\x37\x2b\xc3\x5e\xf1\xcb\xc9\x4f\xbf\x5c\xf6\x5b\xac\x2d\xb6\x62\x87\x91\x2f\x28\x62\x73\xcc\x48\x3c\x7a\x9f\xda\x9f\x52\x70\x35\xc0\xad\x8c\x2b\xd0\xe3\x0a\x8a\x48\x60\x51\xbb\x2e\xa8\xce\x99\xce\xf0\x03\x39\x07\x4a\x9b\xb8\xcb\x33\x13\x67\x91\x47\xce\x1b\x01\x59\xe4\x1c\x08\x60\x51\xeb\x85\xd7\x91\x2d\xfa\xec\x08\xcd\xf5\xfb\x2e\x05\xcf\x16\x18\xb9\x4c\xe8\xa0\x67\xe0\xbe\xb5\x85\x8e\x5c\x26\xfa\x4c\x8d\xd6\x8b\x9c\xb7\x02\x92\xc8\x79\x25\x20\x8c\x9a\x0e\xfd\x9c\x24\x11\x0c\x07\x43\xdc\xe0\x5f\xbf\xf0\xe7\x93\x67\x5a\x2f\x97\x4b\xc9\x18\x7d\x0b\x4b\xb6\xb7\x30\x3a\xad\xa9\xb7\xfd\x18\x7f\x8d\xe4\xc6\xd0\x96\x78\xcd\xe7\x45\x3a\x7a\x6d\x45\x9b\x26\xe8\xa8\x27\xca\x35\x8f\x17\x14\x82\xa8\x5d\xc3\xa9\xba\x7d\x82\x9f\x1f\xcd\x48\xa8\x00\xc9\x8b\xb4\x87\x8c\x01\xa5\x30\xca\x61\xd1\x8d\xd6\x1a\x62\x72\xf4\x28\xe1\xda\x56\xa3\x29\x0a\x3e\x3a\xc9\x7d\x4d\xc0\x04\xb4\x7d\x2e\xaa\xa8\x4e\x0d\x9d\x99\x28\x59\x3a\x91\x84\xa4\x7f\x14\x0d\xe2\x6a\xa4\x2c\xed\x74\xc1\xa2\x22\x72\x6a\x1a\xad\x8f\x50\x59\x4d\xae\x61\x0a\xf1\xa2\x31\x6a\x15\x7d\x74\xc4\x7f\x25\xbe\x24\xce\x8c\x04\x11\x78\x3a\x01\x67\x18\x90\xa4\xb4\x3e\xc9\x67\x75\x43\xd2\x08\x18\x02\x42\x9f\x1d\xf5\x48\xb2\xc1\x30\x5e\x3d\xb0\xbe\x47\xf3\x74\x19\x7d\x17\x92\xe6\x38\x65\x88\x7b\x75\x2d\xaf\x52\x67\x4a\x5c\xb5\xf4\x3a\xf8\x89\x1a\xd5\xf3\xc2\xcf\xab\x79\x5d\x99\x5a\xa6\xe7\x99\xd9\x88\x24\x24\x18\x99\x0c\x3f\xac\xa5\xb0\xe7\x95\x40\x31\x1c\x23\x32\xd4\x5b\x17\x8b\x9c\xa1\x78\xa6\x97\x39\x01\xed\xc0\xde\x6a\x2e\xd0\xe1\xce\x4f\x76\x80\xa7\x8c\x02\x3b\xb3\x63\x60\x9b\x4a\xfa\x61\x92\x82\xeb\x9a\xf7\x6e\xa0\x5e\x71\x49\xc1\x7b\x63\x0b\x70\x27\xfa\xfd\xa2\x9e\x6b\x42\x83\xe4\xef\x45\x94\xb7\x8a\xa9\x2b\x9e\x0b\x51\xf2\xf3\xfa\x2c\x6d\x0e\x74\xda\x3c\x3a\xc2\x15\xe3\x7d\xef\x0d\xf0\xbe\xeb\x62\xc0\xa7\x2c\xd2\x16\x7b\x14\x62\x35\x39\x55\x1f\x55\xa4\xab\xf5\x27\xc8\xc9\xf2\x08\x7e\xaf\xb7\x78\xf6\x7b\x53\x03\x76\x90\x37\xd8\xac\x34\x08\x48\x54\xd6\x38\x53\xfd\x06\xea\x69\x13\xce\x89\x80\x41\x35\x9c\x4b\x6b\x4d\xec\x73\x38\x28\x16\x40\xb5\xd1\xe6\x50\x0b\x0a\x6f\x84\xb6\xc4\x38\x03\x25\xb2\x45\xce\x52\xd8\xbd\x98\xab\x0a\x7f\x27\x86\x5c\x64\x62\xc8\xf9\xa9\x62\x1e\x0e\x1a\x42\x55\x99\x7f\xbe\x85\x30\x80\x86\xe4\x2b\x95\xc8\xe7\x5e\xd8\x64\x3a\xfc\x32\x23\xf1\x06\xa2\x11\xec\xc9\x8f\xa0\x1b\x50\x8c\x86\x7e\x85\xdf\xce\x43\x6b\x99\x78\x50\xd3\xa8\x88\x07\x75\x20\x28\xbc\x4b\x49\x57\x12\xeb\xab\x48\xe2\x8b\x8e\x9f\x09\x34\x6a\xe8\xe8\x5c\xe9\x18\xa4\xbc\x1b\xad\xb5\x29\x67\xdc\x9e\x24\xe0\x9e\xd9\x31\xca\xc0\xe3\xc8\xc4\x58\xba\xbc\xb3\x19\xc1\xe8\xac\x28\x08\xab\x13\xa3\xb3\xf5\xda\xe3\x08\xfb\x99\xd4\x9a\xcb\x46\xa7\x66\xc3\x43\x33\xc2\x1c\xdc\x4f\xae\x9e\x65\x9f\x9d\xa2\xb8\x9d\xad\x72\x5c\x73\x22\x23\x38\x4d\x51\x14\xa8\x71\x5e\xb2\x9f\xf6\xdd\xb3\xe7\xb1\x93\xa1\x22\x62\x29\x84\xaf\x2e\x43\x73\x52\x52\xa1\xab\x12\x7e\xa6\xf6\x8c\x74\x23\xc8\x40\xf6\x53\x54\x75\xe2\x4f\x45\x25\xdd\xcf\x15\x3e\xd9\xe4\xbe\x41\x9b\x75\x1c\xa2\xfb\x19\x87\x88\x03\x66\xff\x5f\x0c\x95\x9d\x96\x43\x65\xa7\x15\x7e\xbc\x6d\xa8\x9e\x33\x8d\xf4\x50\x49\xa6\x07\x97\xa6\xe0\xa1\xc9\x93\xfa\x2a\xae\xb9\xfd\x21\x45\x25\x54\xd3\x77\xd9\xf2\x77\xb3\xe5\xef\x6e\xd9\xf5\x26\x97\x11\x4c\xa3\x4a\x9b\xcb\x08\x0e\xd2\x72\x38\xd8\x5a\x2f\xf8\x65\x04\x7e\x6d\xc5\xb7\x97\xba\x63\x27\xfa\x9b\xb1\x66\x5f\x4e\x2a\x6c\xe4\x4a\xb5\xcb\x08\xba\xb5\x5a\x4f\x96\x6b\x1d\x2f\x75\x76\x5c\x54\x7b\xba\x5a\xad\xe8\xac\xac\xf5\xbb\xad\x9d\x71\x57\xf6\xfb\x9c\xc8\xfe\x54\x89\x6d\xf8\xe7\xba\x55\x7d\xb6\x87\x6b\x80\xf3\xc7\xd5\x29\xa5\x04\x94\x13\x96\xd8\xfc\x3d\x92\xaf\x8e\xd6\xc0\x0b\xed\xdf\xa6\x97\x52\xff\x36\x25\x71\x51\x32\x8d\xa0\x41\xc5\x2f\xc0\x68\x60\x57\xa1\xca\x73\x92\xbe\xa7\xf6\x88\xa3\x6f\xb1\xfb\xd9\x16\x4e\xd2\x67\x5a\xd5\x5f\xdc\x22\x25\x0e\xf1\xaa\xf0\xa4\xa1\x46\x4d\x58\x89\x4a\x9e\x3a\xbd\x9e\xb9\x92\xc8\x0f\xf0\xf2\x87\x8a\x7b\x08\x0f\x57\xc8\xbb\xa1\xda\x06\x6a\x45\xeb\x8d\xb5\xf5\xc7\xd1\xba\x22\x5a\xe7\x3a\x55\x75\x52\xd1\x6a\xb1\x19\xf9\x9c\x02\x3a\xbb\x88\x3e\x7b\x5c\x04\x41\xae\x59\x69\x0f\xec\x1b\x44\x58\x5d\x0a\xec\xb1\x3d\xc7\xe7\xc7\x14\xdc\x0f\x76\x9c\x11\xeb\x65\x92\x45\x7e\x27\x4e\x64\x27\xcd\xdc\x71\x28\x51\x77\xa9\x70\x2a\xe4\xa9\x93\x3b\x61\x8a\xe5\x73\x2e\x3b\x98\x44\xa0\x6f\xe5\x8e\x47\x59\x35\xda\x72\x6e\xfe\xfe\xc1\x0e\x93\x8a\x1d\x73\x6e\x03\xff\x29\x83\x37\xc8\xb3\x05\xa9\x0e\xb8\x08\xaf\xf0\x04\x9e\x18\x83\xf1\x1f\x68\x30\x8e\x2f\x2a\x8e\x10\xcf\xcc\x21\xdf\x7a\xe0\xa8\x5d\x51\x28\xa4\xdf\x1d\xed\x26\x76\xe9\x10\x20\x37\xaa\x29\xa0\xad\x0d\xaf\x72\xaa\xcb\x31\x79\x4a\x0e\x2e\x4e\x09\xab\x9a\x01\x6b\xff\x87\xc4\x8c\x73\x4e\x0e\x5d\xe3\xde\xe2\x5e\x2b\x96\xe9\x98\xe2\xc0\x6f\xd3\x06\x57\x8b\x86\x9c\x9e\x48\x4a\xcb\xac\x9e\x98\x0f\xff\xb1\x7d\x43\x76\x53\x13\x27\xc4\xc3\xb8\xb1\x02\xc1\xc2\xdb\xb5\x71\x0e\x3a\xd9\x67\xa3\x2b\xa3\x80\x39\x39\x51\x4b\x86\xb3\xd7\x63\x7c\x8f\x6b\xb9\xe2\xc8\xb1\xf5\xc0\x08\xda\x23\xd5\x6c\x37\x51\x52\x6d\xc3\x08\x91\x2d\xe0\x0c\x48\xa6\x8f\xa2\xb1\x56\x5c\x19\xf3\x8c\x7c\x4c\x91\x4f\xaf\x8d\x54\xf4\xdd\x0f\x0b\x58\xb6\x50\x3f\x71\x69\xc5\x60\x45\x73\xce\x0d\xe0\xe9\xad\x82\xe7\x9e\x06\x43\x8c\xd6\xde\xf1\xc3\x74\x12\xb1\x79\x87\x05\x81\x0e\xbb\x84\x99\xca\xd3\xb5\xb0\x08\x2d\x10\x5e\xc0\xa8\xb7\xea\x42\xb1\xa7\x61\xb4\x84\xce\x2f\x39\x74\xa2\xff\xce\x14\xd5\xe0\xd7\x74\x35\x56\x47\x65\xd4\x6b\x9c\x54\x55\x8d\x57\xab\x8e\x63\x2b\x7d\xa8\x2d\x51\xeb\x3c\xd1\x38\xba\x38\x98\x51\xd2\xe0\x0b\xa2\x84\x10\x51\x56\xec\x4f\x57\x5c\x99\x54\x57\x2e\x2e\xf9\x63\x48\x96\x9c\x03\x87\x43\x3b\x2b\x7d\x72\xf4\x52\x25\x18\xfe\x7c\xc5\x89\x06\xaf\x6b\x14\x28\x2c\xad\xcf\x0b\xbd\x3e\x49\x93\x4f\x5f\xee\xbd\x50\xcc\x17\x4d\xb6\xa2\xf5\x51\xd0\x5e\xbd\xde\x7f\x7d\xfc\x7a\x39\x10\xda\x3c\xaa\x78\x19\xe8\xab\x3b\xe3\x62\x70\x13\xfd\xb5\x8b\xb4\xbe\xeb\xb7\xdc\xa5\xcd\x22\x10\x30\x8f\xf4\x8d\xc6\xf5\x5f\xe9\xb6\x21\x6e\xd6\xca\x25\xdd\x49\x42\xf0\x9e\x0e\xdd\xc8\x5b\x57\xe0\x42\x47\x21\x73\x0f\xb5\x73\xc5\x51\xd4\x68\x7f\x7c\xc4\x74\x52\x87\xdc\x0f\xf7\xca\xfe\x2a\x48\x4c\x81\xcd\xed\x18\xdc\x13\x7d\x2f\x7c\x1c\xb5\x5a\x1f\xae\xe6\x3f\xf7\xb2\xfc\x9e\xbe\x06\x91\x51\x52\xd3\x92\x86\x49\x7d\x9b\xe3\x4c\x6b\x4f\x16\x14\x5e\x47\xce\x21\xf9\x73\x08\x9b\x30\xf8\x4e\xe1\xac\x4d\x21\xb0\x42\xb6\x36\xeb\x07\x60\x6e\xcf\xc8\x71\xd4\xac\x2c\xde\xe3\x70\x14\xa9\xe9\xbe\x8e\xe8\x02\xe2\x9a\x0f\xf0\xca\x79\xec\xda\xc6\xc1\x4f\xc3\xb6\xfb\x1e\x91\xde\x5c\x1f\xf3\x39\xb9\x46\x2e\x0e\x76\x56\xa7\x9e\x53\x88\xf7\x8a\xb6\x1b\xe6\xbc\xef\xfa\x74\xd5\xb7\x57\x73\x9a\xde\x32\xe5\xc8\x1b\x57\xbe\xa6\x09\xdf\x9c\xdc\x44\x68\x24\xd0\x95\xe4\x58\x09\x33\xde\xa8\x24\x59\xbc\x4a\xb2\x2c\x6a\xef\x26\x25\xc5\xdb\x2e\x0e\xfb\xd2\x69\x0d\x91\x51\x61\xc8\xb7\x78\xda\x7d\xee\x9a\x41\xd2\xf7\x28\x6a\x27\xb8\x5b\xeb\x14\xd0\x93\x2e\xcc\x3d\xe9\x3c\x87\xad\x78\xd2\x25\x14\xbc\xef\x94\xda\x0d\x48\x20\x5c\x60\x30\xcf\xe6\xa3\x5e\xf5\xfa\x43\xff\xf1\x68\x8d\xbd\x55\xfb\xee\xaf\xa0\xd0\xe1\x9d\x28\x74\x7b\x75\xd7\xd0\x8d\x7b\x15\xc7\x56\x49\x61\x05\xdb\xba\x07\x36\x5a\x9e\xe8\x36\x76\x95\x1c\xa8\x32\x2f\x23\xdc\xd0\xc4\x1c\xfb\x1d\x33\x4d\x14\xa4\x22\x0a\x3c\x27\x0a\x28\x89\x57\x75\xfb\x59\x83\xa3\x9e\x1a\xdd\x10\xc1\xef\x24\x05\x09\x19\xbc\x66\x15\xf4\xb9\xae\x45\xb9\x47\xcb\xac\x4d\xa6\x5d\xe9\xde\xae\xcd\xd9\xcc\x0e\xed\x18\xd8\x17\x5b\x82\xfb\xcd\xe6\xe0\x76\x35\xa6\x78\x65\x4c\x3d\x12\x11\x5e\x84\x31\x8b\xd6\x65\x0d\x16\xc6\xb2\xe2\x85\x68\xcf\xf1\xa1\xcd\x53\x52\xf5\x99\x23\x93\xf4\xbb\x55\x83\xba\x6b\x0b\x30\xa9\x88\xe1\x8d\x19\x07\xf3\x7d\xc1\xd3\xb4\xdd\x3c\x45\x6b\x20\xd6\x1b\xa4\x08\xce\xfc\x79\x93\x39\x4a\xca\xa5\x8c\x6a\x56\x27\xb9\x41\x8a\x1f\xa6\xcc\x8d\xfe\xa5\x0c\xcf\x67\xda\x1a\x67\xc2\xb5\xf1\x87\x20\x27\x09\x79\xa3\x44\x94\x3c\x8f\x4c\x6e\x79\x72\x20\x74\xd2\x19\x3d\xb9\x32\xd9\x4c\xca\xc9\x0b\xa1\x33\x39\x8b\x3b\x6c\x2f\x7e\xb2\x0f\xb6\x04\xf6\xd1\xce\x80\x7d\xb2\x39\xb0\x67\x36\x03\x17\xd5\x67\xe7\x7a\x49\x0f\xcc\x92\x5e\x24\x27\x5c\xa4\x61\x12\x97\x8b\xea\x66\x61\xe4\xbf\x42\xe3\x9e\xa5\x57\x5f\x52\x2e\x2a\xaf\x04\x8b\xbd\xcb\x6a\x46\x9d\x69\xb8\xdc\xcf\xb4\xd2\x71\xca\x71\xae\xda\xc4\xe5\x65\x94\x1b\xdf\xc8\x50\x4d\xf1\x20\xb7\xae\x89\x83\xf0\xc2\x82\x57\x62\xa9\xfd\xbb\x38\x48\x70\x51\x74\xa5\x28\x4b\x31\x4c\xdd\x99\xea\xf3\xad\x49\x6d\x7d\x1e\xdd\xc3\x1d\x27\xe7\x0e\xb4\x5b\xfc\xc7\x00\x62\x78\x19\x2d\xd3\x65\xa1\xe8\xb2\x36\x06\xdf\x8b\xd6\xdd\x80\x69\xfe\xff\x3c\x82\x82\xbf\x3e\x63\xf0\x45\xf1\xd7\x39\x82\x70\x8f\xec\x9f\x2e\xfa\xbd\x2c\x0c\xda\xa3\xb0\x1f\xb5\xc7\x15\x7c\xa6\xa3\x2a\x3d\x59\xbd\xa9\x7c\x62\xb0\xc5\x94\xec\xba\x70\xa4\x0e\xb9\xfb\xd8\x70\xf6\x1c\x24\xd5\x81\xae\xea\x58\xe1\x31\x32\x65\xde\xcc\xf6\xc0\x3b\xb5\x5f\x71\xe2\xd1\x8a\x34\x33\xd4\xe4\xc9\x83\xc4\x99\x92\xb3\x08\x8e\x11\x75\x0c\x4c\xaf\x0a\x33\x68\xb2\x51\x77\x37\x35\xdd\x7d\x52\xdd\x81\x37\xd0\xb7\x40\x1a\xe9\x1d\x70\x60\xab\x26\xa9\x79\xc3\x53\xfb\xa5\x66\x0e\x0b\x01\xe0\x8b\x76\x51\xcd\x3d\x54\x6b\xfa\x03\xf4\x95\x76\xe6\xe4\x2a\x02\x13\x0b\x3d\x59\xa8\x21\x0e\xf5\xd0\xc2\xd5\xa1\x9d\xda\xef\x39\x51\xe4\x69\x68\x87\xe5\xa8\x3e\x57\x47\x35\xb0\xc3\x06\x04\xea\x9d\xda\x3f\x38\x09\x69\x3e\x3c\xdc\xcd\xd7\x98\x5e\xf7\x5a\x8b\x71\x57\xaa\x78\x59\xba\x0c\x9a\x3b\x7a\xc1\x49\x40\x97\xe7\xf9\x86\x93\xb7\xea\x6d\xa3\xdb\x7d\xde\xee\x6c\x99\x79\xdf\x6e\x76\xb3\x8f\x4b\x36\x6c\xb8\x24\xed\xed\x25\xa4\x5a\xd6\x12\x09\x40\x56\xa2\x59\xac\xda\x1e\x97\x83\xb9\x4a\x1a\xe9\xe7\x8c\xec\x19\x49\xa2\x20\x9c\x81\x86\x49\xcd\x71\xb8\x8f\xd5\x67\x66\x39\x4c\x06\x4d\x30\x89\xb7\x73\x05\x14\x6a\x32\xe8\x6a\x10\xd4\x9d\x78\x03\xdd\x89\x01\x41\x77\x75\x9f\x07\xb6\xdb\x0c\x74\x8f\x6d\x57\x1d\xeb\x7c\x38\xde\x59\x4b\x07\x67\xa6\x03\xb6\xc4\xa9\xa5\x1a\xd8\x4c\x5b\x03\x66\xe9\x6a\xeb\xa1\x9d\x36\x43\xd7\xa6\x9d\x3a\x37\xe4\xa2\xe8\xa2\x9b\xcf\xa1\xa1\x93\xae\xe9\xa4\x18\xc2\x0a\x48\xb8\x2b\x8c\xce\xef\xf5\x2a\xf3\x95\x2a\xf5\x10\x01\x5e\x5c\xaf\xb2\x59\xe7\x61\xbd\xeb\x4a\x95\x16\x29\xed\xc7\x32\xeb\xf6\x21\x5a\x9f\xf6\x71\x66\x73\xb8\xb5\x25\x64\x3a\x97\x57\x06\x3b\x9a\xe2\xec\x46\xcd\xee\x79\xee\xc3\x87\xc4\xb2\x34\xef\xa5\x95\x81\x6e\x9e\x88\xa1\xcf\x60\x37\x42\xed\x08\xb5\x4f\x17\x70\xda\x8a\x92\xaf\x65\x7e\x71\x38\xd2\x51\x7a\xed\x3c\x93\x83\x89\xa1\xfc\x6e\xf9\xe3\x52\xcc\x2b\x4d\x7d\xde\x90\xb5\x8c\x2e\x3c\x66\xd8\x86\x22\x45\xd4\x62\x01\x1f\xdb\x50\xb8\x0e\x58\xeb\x58\x26\xa7\x9e\x54\xf3\x92\x3a\x58\xe1\x03\x99\x47\x2a\x8c\x1d\xf5\xc8\x40\x3a\xef\x22\xbc\x61\xce\xaf\xe6\x2a\x19\x1a\xc5\x73\x89\xee\xeb\xb1\xaa\x13\x2f\xdd\xbd\x69\xdb\x0b\x1d\xa8\x18\x4e\x23\x1d\xb2\xa5\x9a\xfc\x80\xc2\x61\xeb\x1a\xb5\x84\x96\x7f\x10\x97\xfe\xf3\x26\x09\xd6\x72\x7a\x03\xd1\xcf\xf2\xa8\x91\xb4\x5c\x33\xbc\xd9\x78\x9e\xc7\x51\xe3\x7d\xf6\xf0\xe1\x72\x64\xb9\xa2\x4e\xec\xc8\xc5\x82\xc4\x64\x4a\x3e\x44\x70\xba\xe2\x1f\xf9\x52\xd3\xe8\x72\xa2\x7a\xc7\x1f\x3e\xd4\x91\xd3\xfa\x6c\x14\xf7\x5d\x3b\xa6\x0b\x25\x4b\x66\x14\xd4\x3a\x10\x4c\xf7\x7d\x4c\xfb\xdd\x51\x37\xb1\x31\xee\xc0\xc7\x08\xba\x89\x89\x1a\xfc\xd0\xd2\xa8\x52\xed\xfe\x13\x60\x52\xcb\xd6\x27\xd1\xbd\x6c\xb2\x2f\x15\xe3\xc4\x14\xec\xde\xda\x1c\x0e\xed\x04\x8e\x6c\x06\xc7\xb6\x07\x5f\xec\x0c\x76\x35\x2c\xdf\x46\x7f\x21\x93\x9b\x24\x45\xa0\x61\x1e\x18\x55\x33\x72\xd7\x6b\xd4\x1d\x1f\x22\x10\xfd\x1d\x10\xfd\x0c\x44\xff\x16\x44\x7f\x06\xf9\x6d\x12\x26\x56\x8c\x96\x72\xcd\x17\x99\xe6\xdb\x33\xa8\xc7\xfd\x1d\xdc\xd1\x0c\xcd\xe8\x6f\xd1\x8a\x7e\xf6\xbc\x2a\x3a\xbf\xc2\x75\x3f\xa3\xe0\xe9\xad\x2a\x99\x4a\x3d\xdc\xe6\xe4\x94\x33\x72\x1b\xa1\x3f\xcd\x79\x02\xa7\x19\x1c\x84\x04\x1f\xaf\x18\xcc\x43\x62\x05\x2c\x4a\xb9\xa5\x35\xeb\xf0\x36\x5a\x13\xa5\xa3\x31\x41\x7e\xf1\x8d\xc3\x44\x91\x15\x3c\x95\x6b\xef\x93\x67\xe4\xad\x1a\x0d\x91\x8e\x68\xbc\xc4\xbc\xad\x04\x1c\xc5\x49\xaa\x65\x51\x8b\x22\x20\xee\xcf\x80\xe0\xe5\xe9\x19\x25\x68\xdf\x43\xbf\xa3\x9d\x49\xee\x4e\xfe\xa6\x32\x7f\x6f\x4d\xaa\xbc\xc6\x95\xd6\xee\x0a\x67\x06\x33\x68\x44\x80\xb9\x01\x4b\xe2\x70\x41\x62\xf0\xe8\x28\x1f\x59\x91\x8a\xa6\xd8\x86\xef\x0a\x09\x16\x01\x45\x17\x70\xb0\x66\x3f\x1a\x13\xc0\xb8\x06\xbd\xe6\xc9\x1c\x51\x64\x5d\x28\x29\x6f\x46\xae\xe0\x44\x7d\xf3\x85\xb8\xcf\x3f\x6f\x04\xc9\xe3\xc2\x53\x38\x28\x42\x47\x53\xfc\xa1\x23\x44\xe3\x7d\xcb\x6d\x04\x2b\x31\xa2\x73\x48\x99\x63\x3e\x53\xaa\xdb\x14\x91\xa6\xa9\x82\xb3\x4a\x34\x6c\xf3\xbb\x0c\x7d\x6d\x5e\xe8\x48\xd7\xe6\x87\xf1\x23\x30\xf2\x9f\x1a\x9c\x12\xe5\xc2\xf8\x42\x0d\xaf\x96\x34\x31\xf9\x57\x77\x4e\x9f\x34\x8d\xa0\x31\x93\xac\x93\x10\xcf\xa0\xfc\x02\x87\x9f\x6a\x1c\x5e\x87\x36\x13\x5e\x56\x50\x88\xf5\x9e\x12\x89\x50\x56\x6e\xaa\x12\x13\x67\x4a\xce\x39\x3a\x3e\x7c\xf7\xe1\xad\x05\xd7\x12\x65\x77\x04\xec\x37\xe5\xca\xa8\x99\xe1\xbb\x33\x7c\xcb\xfd\x50\x5f\x67\x9f\x88\xe5\x34\xb0\x71\x69\x44\x1a\x43\xe6\xc8\xb5\xa7\x4c\x00\x3a\x13\x65\xf9\x39\x0b\xd7\xf8\xf3\xe7\x37\x68\x82\x82\x7b\xad\x50\x70\x02\xc2\x25\xe3\x80\xc2\x24\x20\xa7\xd4\xa4\xac\x0c\x5b\x06\x6e\x60\x28\xe6\x33\x0b\x75\x1d\x02\xc2\x2a\x1c\xa9\xe7\xdc\xd7\x42\x83\x08\x42\x68\xc3\xa0\x53\x72\xa2\xc0\x7d\x47\xc2\x83\xa1\xfa\xb7\xf2\x7f\x4c\x35\x65\x3e\x54\xf9\x78\x15\x5c\xf5\x6a\xaf\x0e\xf0\x0c\x4f\x3b\xbe\xd4\x42\x23\x85\x37\x91\x43\xb2\xc4\x41\x65\xd8\x15\x7c\xe2\x70\xa0\xe6\x71\x05\x2f\x13\xb4\x9c\x50\x8f\x57\x09\x1c\x98\xc7\x1f\x1c\x42\xfd\xf4\x9e\xc3\x99\x7e\x7a\xc1\xe1\xad\x29\x7e\xc5\xe1\x95\x79\xd4\x51\x17\x1b\x66\xa6\xf9\x98\x05\xfd\x4e\x9b\xcc\x26\x10\x25\x36\xb1\x53\x68\x19\x93\xa1\xb4\x79\x10\x39\x3f\x04\x7c\x8e\x4c\x64\xc7\x2f\xda\x76\x61\x6b\x01\x2f\xf4\xd3\xe6\x02\x7e\xe8\xa7\x27\x0b\xf8\x14\xdd\x27\xc9\xd5\x55\x8b\x89\xf2\x18\xb8\xf3\x92\xe9\xa8\xb7\x33\xf2\x29\xd2\x26\x4a\x6e\xa2\x73\xfb\x5a\x13\x91\xf8\x19\x36\xb2\x20\x48\x30\xf6\x9b\x92\x5a\x47\x8a\xa2\xdb\x8a\xfa\x7b\x4d\xad\x2e\x6a\xe9\x0a\xf2\x96\x49\x43\x88\x25\x6c\x9e\x47\x50\xaa\x76\x12\xd4\xc2\x45\xed\xe8\x4e\x8c\x2e\xe8\x5f\x8b\xf5\x14\x36\xc5\x7a\xaa\xd8\x6f\xd4\x7c\xac\x57\x12\x7f\x74\xae\x13\xec\x27\xd0\xb3\xb7\x96\x06\xee\xd7\x7c\x6e\xcc\xc0\x21\x73\x6e\x39\xc9\x97\xee\x52\xca\x89\xfd\xe8\x51\x94\x78\x2c\xba\x4c\x52\x69\x3f\x1b\x3c\xdb\x7a\x64\x55\x95\x1a\x11\x9c\x6b\x67\xf1\xb1\xf3\x53\xdf\xe5\x9d\xe2\xcd\x9c\x2c\x15\xde\x51\x02\xee\x89\x3d\xac\x80\x61\x77\x15\x4b\x4e\x57\x5f\xf9\xab\xaf\xa2\xd5\x57\xe9\xea\x2b\x77\xf5\x55\xb0\xfa\x2a\x5c\x7d\xd5\x80\xbd\x1b\x48\x31\x5b\x7d\x95\xad\xbe\x6a\x48\x77\xdb\xc0\xc6\x89\x35\x9c\x9d\x92\x68\x7d\x70\x9f\xd8\x21\xb8\xcf\xec\x00\x3c\xd7\xf6\xc0\xbb\xb6\x39\x78\xb1\x9d\x81\xf7\xc3\x96\xe0\xcd\xec\x14\xbc\xb9\xcd\xf0\x36\x14\xbc\x77\x76\x02\xde\xa9\x1d\x81\x77\x66\xc7\xe0\x75\xed\x29\x78\x03\x7b\x0c\xde\xd0\xee\x82\x7b\x64\xbb\x8b\xfa\xff\x96\xaf\x63\xf3\x9d\x7b\x30\x04\xf7\x00\x2f\xfd\xc8\x8c\x5c\x24\x18\xa6\x50\x3d\x5e\xaa\x47\x8f\x52\xc2\x29\x09\x29\x99\x25\x54\xf3\xcd\x24\xa3\x84\x51\x12\x26\xe5\x7f\x01\x25\x1e\x25\x92\x92\x9f\xde\x8d\x9d\x2c\x34\x97\xfc\x43\x38\xb7\xb1\x02\x91\xf7\xad\x08\xa1\x94\x44\xc4\x06\xb1\x6c\x6b\xe3\xac\x48\x77\x04\x5f\xd7\x59\xdd\x1a\x09\x4d\xda\xbb\x44\xc2\x2e\x29\xa5\xb3\x6f\x51\x3b\x43\x97\x28\x22\x76\xea\x9a\xd8\x46\xb1\xef\x7c\x8b\x88\xe5\x45\x2c\x4d\x3f\xb0\x31\xb7\x28\x08\x3f\x77\xed\x72\xaf\x95\x94\x28\x7d\xe7\x5b\x4c\x2c\x3f\x9c\x5a\x14\xb8\xfe\x91\x4e\x58\x6c\x51\xc8\x7c\xe7\x6b\x0c\xcc\x77\x66\x44\xfa\x70\x0a\x88\xcd\xb9\x79\xca\x7c\x62\xed\x27\xcc\x0f\xe3\x8b\x7e\xbf\x6f\xd1\xef\x3a\x28\x8e\xe7\x3b\x42\x40\xe2\xb7\xc4\x96\x49\xbe\x4c\x26\x5c\xbc\x64\x29\x27\x74\x01\xa1\xff\x17\xae\xd6\xf4\xbd\x5a\xae\x84\x58\xbe\x8b\x60\x7e\x1d\x57\x7c\x64\xc5\x65\x5a\xa0\x27\xe5\x66\x52\x26\x6a\x5a\xae\x7f\xcf\x70\x4c\xa9\x6e\x18\x5a\x14\x22\xdf\x61\x02\x7c\xbf\x7d\xe1\x23\x1f\x62\x30\x3d\x08\xb4\x5a\x9b\xfa\x8e\x27\xa0\xeb\x37\x6c\x71\xec\xc4\x23\x2b\x60\xbd\x71\x18\x67\xa9\x65\xab\xc7\x49\x94\xa5\x56\x89\x88\x02\x5f\x2d\xf2\x09\xe2\xa2\xd8\x27\x96\x2b\xe3\x8e\x2b\xe3\x5e\x92\xc9\x28\x8c\x79\x2f\x8c\x83\xa4\xe3\x26\xc2\xe7\xa2\x37\xe8\x8c\x45\x6f\xd8\x19\xbb\xbd\x21\xb2\x0b\x53\x1f\xac\x31\x13\x17\x61\xdc\x8b\x78\x20\x2d\xb0\x7a\x5b\x82\x8f\xd5\x1e\xe9\x3d\x4c\xb1\x73\xd5\x6d\xc0\x50\xe7\x8e\x9f\x18\x8b\xde\x26\xd6\x39\x55\x5b\xaf\xd8\xb7\xc4\x98\x0b\x8e\x35\x18\xc9\x50\x46\x0a\x84\x2e\xf5\xba\x64\x91\x45\x61\xa2\x9f\x99\x45\xe1\xc2\xd7\xb6\x84\xad\x4b\x74\xcc\xf3\x60\x77\xad\x55\x7e\x04\xa6\x0a\x39\x24\x7f\xe6\xf8\xda\x02\x7c\x4a\xd5\xe3\x77\x34\x4d\xf2\xd7\x26\x10\x8a\xab\x19\x2b\x4c\x00\x11\xed\x07\x0d\xc6\x2a\xc5\xb9\xf0\x49\x86\x59\x4d\x46\xfa\x98\xb1\x7e\x77\x44\xd0\x27\xa4\x48\x3d\x17\x3b\x26\xc1\xc1\x4c\x12\x8c\x2d\x9a\x48\x18\x4b\x22\x37\xac\x8e\x3a\x29\x94\x62\xca\xe6\xc6\x3a\x58\x43\x34\xd5\x10\x14\xf3\x9b\x54\xbf\x5a\xfb\xda\x58\x92\x7c\x20\x3a\x83\x56\x86\x1f\xab\xf4\xa4\x3f\xb0\x61\x7a\x5b\x7a\xaf\xdf\x61\xe8\xf8\xc6\x13\x78\x48\xf2\xf8\x4e\xfd\x74\x12\x85\x92\x3c\xfa\x67\xba\xf1\xe8\x42\x89\x9b\x37\x66\x8f\x99\xb8\xe0\xd2\xa2\x70\xad\x37\x56\xfa\x16\x85\x1d\xf3\x7c\x69\x51\x38\x32\xcf\x8a\xef\x3c\xf6\xdb\xef\xdf\x63\x74\xfb\xe8\xb3\x21\xa5\xa3\x0a\x38\xef\xc8\xfb\xc0\x73\x2e\xa2\x34\x02\x6c\x7e\x6e\x3a\x35\x80\xb5\xf0\xea\x43\x81\xac\x9d\x7f\xf0\xce\x2f\x59\x14\x4e\x18\x29\x46\xda\xf6\x3d\x75\x38\xdb\x3f\xb7\xa0\xf0\x5a\xaf\x4a\xa4\x70\xc5\xd9\xd2\xaa\x70\x28\x04\xa6\xf6\x0c\x11\xa2\x9a\x21\x42\x49\x15\xa3\xf3\x3c\x3f\x30\x66\x88\xb3\xcf\x31\x7e\x87\x11\x71\x2b\x39\x61\x50\x99\x23\xe0\x9c\x9c\x82\x96\x22\x28\x5c\xad\x3d\x1c\xf5\x94\x95\x71\xe1\x0d\xe5\x3f\xc7\x3b\x20\x5f\x7b\x25\xf4\x39\xcd\x53\x6b\xbd\x34\x3b\xce\x5c\x3c\xfd\xe7\xbe\x23\x19\xec\xf9\x6d\xee\x77\x10\x3b\xbb\x84\x70\x67\x46\xce\x7c\x13\x9b\x55\xc2\x27\x49\x2a\x09\x6f\x68\x35\x50\x7b\x2b\x2e\xf8\x54\xe0\x82\x98\x62\xb4\x76\x17\xf3\x76\x84\x4a\xc4\x41\xc7\x67\xb7\xc2\xb8\xbd\xf6\xf5\xce\x29\xdc\x37\x49\xd2\x50\x73\xcf\x28\x90\x84\x9e\xa5\x61\x8e\x45\xe1\x45\xdc\x0b\x25\x1f\xa7\x3d\x74\x34\xef\x44\x61\x2a\x7b\x3a\xa4\xbf\x7a\x5d\x02\xe0\x44\x21\x55\xb7\xb7\x5d\x82\xa0\x2c\x40\x62\xd6\x1b\x0e\xb0\x74\xb3\xe3\xf7\x82\x88\xdf\x74\x56\x3a\xce\x9b\xfd\x50\x22\x2b\x0c\xfe\x78\x81\x76\x8c\xef\xd5\x49\xf0\xfc\x26\x41\x44\x64\xe4\x67\xd7\x7e\x86\xb9\xdb\x50\x12\x3b\xf6\x75\xbc\x15\x5b\x41\x9a\x45\x81\x60\x90\xce\xa7\xb4\xdf\x1d\xe9\x37\xf6\x1b\xa6\x7d\x07\xbe\xe0\x37\x4e\x33\xb5\x36\x47\x7d\x97\x57\xaa\x20\xb5\x6e\x39\x06\x3e\x8b\x2f\xb8\xa8\x1c\x84\x36\xd8\xe7\x73\xde\x4b\x23\x96\x5e\x36\x1c\x80\x42\xcd\xa0\x48\x7f\x31\x84\xf8\xdf\x3d\x04\x97\x47\x51\xcb\x18\x0e\xb2\xe2\xfb\xcb\xaa\xd2\xe2\xf2\x66\xa4\xe5\x57\xc7\xda\xf8\x1c\x92\xcf\xd9\xaa\xc9\x7e\xe9\x1d\x1d\xa2\x0e\x7d\x60\x32\x8d\xb9\x8a\x4f\xd3\xe0\x56\x49\x71\x78\x6f\xbc\x72\xcb\x30\x86\x6c\xf3\x8c\xa2\x30\xbe\x5e\x99\xcb\x7e\x18\x5f\x6b\x84\x42\x30\x19\x18\x5c\x10\x81\xde\x72\x39\x16\x7d\x59\xf4\x82\x87\xb1\x53\x40\xe2\x10\xbb\xc1\x99\xbd\x65\xfa\x9c\xb0\xf7\xb4\x84\x1e\xc3\xc0\xe9\x88\xff\xe8\x81\xa2\xd1\xd6\xbe\xbf\x26\x1d\x71\x91\x27\x68\xd4\xee\xe5\xa0\x6d\x8c\xcb\xe5\x91\xd5\xe5\xd1\x27\xab\xca\x9c\xe4\xcb\xb1\x0c\x13\x92\xdf\xc8\x9e\x51\x51\x19\xf6\x25\x4b\xb9\xe8\xa5\x3c\xe2\x9e\x62\x5f\xc2\x38\x94\x21\x8b\x8a\xd2\xde\x38\xb9\xed\xdd\x51\x65\xc6\xdd\xeb\x50\xde\x51\xcb\x6c\x97\x97\x44\x4a\x66\xb4\xfe\xeb\xb1\xeb\x0d\xfc\x82\xee\x64\x3e\x11\x1b\xff\x70\xac\x7f\x6c\xc4\x1b\xff\xb0\xfe\x81\x5b\x72\x17\x65\xd1\x04\xe5\x90\x91\x73\xa2\x55\xd7\x30\xf6\x89\xf5\x06\x41\xb0\xe3\xce\x3b\xf2\x32\x4c\x3b\x11\x73\x79\x54\xf9\x8a\xb5\x91\xf3\xcf\x0b\xe0\xd4\x6e\x58\x22\xf5\x99\x94\x7b\x49\xec\x33\x31\x5f\x5d\x51\xd5\xc7\x87\x44\x76\x70\xc1\xcd\x79\xf8\xae\x10\xb8\xf7\xeb\x17\x46\xb2\xc6\x48\x3a\xcc\x59\x8f\x7e\x86\x83\x1c\xff\xcc\x49\x57\xe1\x1f\x86\x86\x4d\xd2\xf9\x2a\x08\xc6\x9d\xc2\xab\x97\xd1\x99\x24\x52\xb1\x26\x78\xbc\x2c\x7b\xe9\x67\x6a\x81\x74\x6a\xe3\x1f\x47\x7a\xb4\x83\x62\xd9\x67\x97\xa1\xe4\xbd\x74\xc2\x3c\x6e\x81\x15\x27\x33\xc1\x26\x95\xa9\x48\x3d\xfc\x25\xa8\x3a\xad\x63\xe1\xb1\xdb\xdb\x32\x50\x9f\x48\x60\xb0\x4b\x12\x1d\x8b\x47\x8c\x66\xe4\xb2\xa8\x56\x62\x78\x33\x84\xfc\x9c\xcc\xc8\x9e\x0f\x98\x84\x35\x2b\xce\x89\x3e\x14\x1f\xfc\xf6\x8b\x06\xcc\xd1\xab\xce\xc9\x34\x81\xa9\x36\x00\xf5\xab\x96\x42\xfa\xf8\x68\x0f\xab\xd8\xe4\x0c\x8b\xab\xf7\x3c\xd5\x29\x4c\xa2\xde\x63\x33\xa0\x5d\xd9\x46\x07\x5d\xb2\xef\xe3\xa9\xc4\x64\x8a\x2e\x5e\x13\x85\x78\x57\xb3\x07\x31\x64\x28\xe5\xd1\xaa\xdd\xbf\x7b\x8b\x5c\x2e\xc3\x0a\x0e\x22\x31\xa6\x9a\x3c\x5f\xee\x70\x46\x3e\xf8\x18\xe0\x4a\x11\x59\x50\x58\x2f\x2b\x34\xab\x1f\x99\x02\x27\xbd\xa3\x1a\xa6\xd2\x4e\x90\x64\xb1\x8f\x66\xe9\x9e\xb8\x43\xfc\x7c\x1f\x18\xf1\xf3\x54\x49\x40\xc4\xf2\x2e\xb9\x77\x8d\x87\xfb\x9d\x91\xa8\xe2\x49\xa6\x78\xcc\x8f\x86\x6b\xd2\xc7\x01\x0e\xfd\xd2\x38\xd5\xf0\xa1\x50\x34\xfe\x4e\x51\xa3\x75\x62\xd8\xd4\xf9\x44\xf1\x22\xb7\x7e\xbb\xfc\x9c\xf3\x04\x6a\xa5\x63\x36\x45\xf2\x5c\x62\xa0\x8f\x25\x20\x49\x05\x9e\x0a\x46\xb5\xee\xbd\xe7\x25\xb1\x14\x49\x54\xfc\x54\x03\x70\x93\x9b\xb2\xed\x3b\xcd\xcc\xfa\x66\x66\x58\x86\x0c\xc6\x72\x07\xbd\x7c\x9a\xa7\x7e\x99\x9f\x92\x52\xf8\xc4\xf4\xa5\x8f\x04\x91\x51\x83\xfe\xab\xe7\x65\xa5\x17\x3f\xf4\x50\xa9\x75\x77\x5d\x9f\xa7\x9e\x08\x27\xc8\xfc\x94\xe7\x29\x36\xc8\x45\x83\xf7\x2b\x7f\xbd\x17\x67\xfb\xaa\x99\x5c\x4c\xd5\xef\xab\x2a\x48\xcb\x2a\x1c\x3b\x2f\xf9\x6f\x83\x65\x99\x77\xad\xa0\x28\xf6\x2d\xb0\xa4\x60\x71\x3a\x61\x02\x95\xd5\x06\x1f\x04\x49\xac\xb1\xf3\x25\x17\x61\xf9\xda\xcb\x44\x8a\x78\x79\x92\x84\xb1\xd6\x74\xeb\x02\x83\x70\x11\x77\xc4\xdc\x2c\x7e\x3e\x14\x8d\x81\xf1\xf6\x0b\x07\xa3\x67\xfd\xd6\xbf\x67\x74\xe3\x37\x1a\x66\x0b\x0b\x36\x0a\x07\xbe\xf3\x0f\x1e\x4f\x9d\xaa\x5e\xf6\x1f\xf0\x59\x03\x62\xa8\x6a\x7c\xf1\x9d\xdf\xe1\x85\xef\x0c\xb7\xe0\x07\x0a\xc1\x52\xb3\xb6\xd7\x12\xba\x12\x9d\xe6\xe1\xd3\x3d\x54\x07\xc3\x42\x75\xf0\xbe\xe9\x24\xe8\x58\x53\xc6\x5c\xfb\xab\xaa\x91\x26\x60\x5d\xf3\xf9\xcb\xc4\xe7\x16\x60\xbc\x79\x3c\x9d\xc6\x59\xd1\x2f\x3c\x0b\xbb\x41\xd5\xbb\x30\x09\x4a\xff\xbf\x6f\x7e\xe1\xff\xe7\x09\x9d\x0a\x3b\x9e\x6a\x1d\xd0\x98\x45\xea\x50\x8a\x29\xce\x53\x7f\x9c\x82\x9c\xb6\xc6\x1c\xf3\xce\xb5\xe4\x7c\xad\xa3\xeb\x5c\x61\xb2\x57\xb6\x0f\x9e\xf3\x4d\x49\xdb\x90\x38\x27\x4c\x1f\x84\xcc\xc4\x53\x76\x7d\x0c\xad\xe5\x29\xb4\x1d\x3a\x3f\x7c\xc2\x29\x04\xce\x05\xf1\x70\xc9\x4c\x36\x96\x6f\x0c\x50\x16\x31\xd9\x6f\x2c\xcb\xf6\xfa\xdd\x91\x75\xc9\x52\xc3\x40\x5a\x36\xfe\x48\x33\xcf\xe3\x69\x55\x87\x52\x62\x5a\x91\xcc\x3a\x71\xd2\xbb\xc8\xa4\xe4\x22\x6d\xe1\xd7\x77\x35\x6b\xc8\x3c\xf5\xbd\x1a\xb5\xf1\x92\xa8\x63\x6d\x88\x42\xbb\x12\xc6\xbd\x59\xe8\xcb\x4b\x0b\xe4\xc8\xda\x1a\x0c\x26\x37\x96\x6d\x6d\xe2\xdf\x06\x89\xa1\xf1\xf3\xea\xcc\xf2\x58\xf6\x52\x29\xb8\xf4\x2e\x9b\xda\xa9\xaf\x22\x12\xe9\x99\x5b\xc2\x65\x0c\xf4\xd9\x6f\x4e\x7f\x8a\xc7\x21\x48\x44\x81\x17\x70\x1b\x31\x39\x98\x47\x6a\x6e\xc0\xea\x4c\xbf\xf0\x2b\x0e\xd3\x8d\xdb\x93\xa7\x79\xfb\xe2\x53\x93\x0d\xee\x81\xe3\x64\x79\xa3\xf7\x4c\xd5\x7b\x1e\x3b\xe7\x84\x41\x58\x71\x40\x34\x06\x2a\xdd\x51\xe6\xda\x33\xf2\xd6\x87\x07\xc8\x5b\xf7\x59\x41\x69\x32\x77\x41\x21\xf3\x48\x4b\x7e\xe6\x2f\x3e\x1d\xa9\xde\x87\xd4\xc6\x9a\xd2\x23\x5f\x59\x03\x1a\xac\x2c\x52\xcf\x95\x71\xb9\x50\xab\xcc\xda\x44\x84\x63\x26\xe6\x96\x3a\xe9\x24\xa0\x90\x34\x70\x61\x8a\xd1\x93\xa3\xfa\x4e\x78\x49\xd4\x63\x99\x4c\x3a\xb5\xaf\x29\xe2\xb1\xd9\xb4\x7d\x8d\x5b\x37\xb9\x8b\x7d\x54\x72\x83\x67\xb0\xd7\x1d\xd2\x50\x2f\x59\x91\x21\x8c\x17\x4f\x65\x26\x25\x6f\x03\x33\x12\x4f\x8b\xbe\x14\x7c\x28\xce\x7b\x89\xfd\x2e\x17\xe3\x25\x52\x96\x8e\x01\x2a\xe8\xf0\xfe\x45\xdf\x6a\xe6\x7f\x91\x00\x20\x87\x3a\x36\xb0\xed\xb2\x94\x23\x86\x46\x5c\xfc\x95\x91\x03\x9f\x96\x7d\x1f\xf8\xe5\xe8\x4c\x92\x0d\x3e\xbd\xa7\x4e\x36\x9b\x36\x98\xae\xe6\xda\x26\x4a\x47\x45\x94\x3c\x6b\x45\x99\xaa\x86\xec\x8b\x64\xe2\x27\x33\x7d\xf8\xb5\xea\x13\x91\x12\x9f\x62\xc8\x83\xca\x20\x85\x21\x1c\x6c\xba\x8e\xc5\x28\xd9\x88\xe0\x49\xc7\x0f\xdd\xce\xd8\xdd\xec\x8c\x45\xa3\x66\xc0\xe3\x9a\x88\xad\x65\x23\x4e\xd5\x97\x15\x8b\x20\x1b\xc0\x7c\x52\x01\x34\xb5\x47\x38\x81\x82\xb0\x7b\xd3\x7b\x92\xb8\x64\xea\xfc\x0e\xe1\xd4\xd9\x1e\x40\x30\x55\x44\xcb\x9d\x3a\x5b\xbf\x43\x3a\xbd\x67\x5a\xf6\xdc\xf0\xa9\xc8\xcb\x7e\xa1\x53\xa2\x16\x58\xe4\x34\x23\x92\x56\xb3\xb3\x47\xd3\x95\xec\xec\x39\xd5\x60\xe7\x68\x3e\xe0\x46\xa8\xa1\x75\xc7\xc8\xbc\x7a\x53\x6d\x5b\xbb\x8f\x9e\x17\x93\x00\x34\x6d\xd0\x51\x41\xdb\xc8\x68\x3a\xcd\xf5\x45\xef\x24\x86\xcd\xcd\xd0\x76\x59\x8c\x12\x85\x7d\xf4\x0e\x4b\x7d\xc9\x3e\x09\xe0\x34\xfb\x2b\x3d\x72\xec\xad\x89\xb4\xdc\x81\x9f\xeb\xf9\x22\x5b\x70\xb3\x6c\xc3\xcd\xe1\xb4\x58\x55\xdf\x23\x61\x81\x89\xdd\xa5\xf7\x49\xf1\x3e\x28\xdf\x2b\x41\x68\x12\x40\x62\xb0\x39\x9f\x22\x36\xcf\x50\x3d\x9a\xaf\x46\xde\x2c\x99\x56\x10\xbb\xcc\x7b\x88\xbc\x02\xb1\xff\xf0\x09\x43\x87\x95\x12\xb9\x1b\x67\x64\xbd\xbc\xde\x54\x23\x77\x56\x41\xee\xc9\x7a\xe4\x9e\x4c\xe9\x48\x7d\x61\x48\xed\xc4\x20\x77\xd7\x43\x4d\x57\x8a\xdf\x55\xdc\x0a\x32\x87\x5e\x96\x5a\x90\x72\xb5\xa7\x14\xa6\x1e\x49\xb1\xd1\xbf\x91\x0c\x08\xc4\x02\x31\x6d\x26\x05\x0b\x0a\x7e\x3b\xd7\x63\xe0\xd7\x9b\x42\xe6\x3c\x40\x59\x5b\x42\xdc\xf7\xf6\xa8\x92\xbc\x3f\x05\xb9\x59\xab\xa7\xa4\x64\xc5\xbc\x64\x6b\x58\x17\x8c\xc4\xd5\x20\xe2\xa2\xb3\x13\x9b\x82\xf5\x3a\x46\xbd\x8f\xe6\xfa\xb5\x64\x86\x56\x42\x02\x3c\xaf\xea\x35\x27\x46\xa5\xd0\x2c\xe1\xef\x72\x42\xdd\x76\x4e\x88\x35\x70\x42\x75\xfe\x27\x9a\xa2\x28\x0c\xb8\x0c\x15\xed\x2f\x70\x25\x43\xdc\x9f\x12\xed\x2b\xf1\xb0\x73\xcd\xe7\x9d\x20\x11\xc5\xa4\x73\x7d\x83\xd1\xf7\xff\x9b\xba\xfb\x97\xe8\x9c\xeb\x2d\x2b\xb9\xcb\x6f\x56\xde\x1a\xf6\x82\x70\x87\x64\x4e\x4c\x15\xc6\x8b\x9d\x4c\xe1\xbf\xea\x2a\x2b\x3e\xc3\x4b\xc6\x93\x88\x4b\xde\x1b\xf3\x38\xeb\x58\x1b\x84\x64\x7d\xb6\xf5\xeb\x57\xd6\x77\x5f\xd3\x87\x0f\xd5\xd1\xb3\xd2\xcb\x64\xa6\x68\x9d\x62\xa0\x3d\x92\xe0\xb9\xa1\x10\xe8\xc7\xca\x85\x86\x5c\xa5\x80\xaa\xd7\x52\x1d\x92\x4d\x15\x8e\xd3\x8a\xc1\x9c\x2e\xab\x95\x95\x0b\x0a\xd3\xa9\xf3\xb3\x6b\x3f\x59\x40\xf7\x0e\xe2\xdb\x48\x64\x4b\xc9\xd0\x6e\x2a\x5f\x27\xed\x9d\x30\x32\xf6\x88\xe8\x7b\x5f\x56\xf4\x41\x71\x4e\xa6\x2b\xb7\x94\x53\x93\x73\xeb\xb2\x81\x10\x6a\xa4\x1f\x2b\xb4\x5f\x26\x96\x6e\x4d\x02\x7a\x2d\xef\x48\x25\x1b\xf7\xbd\x2f\x8b\x42\xc8\xfb\x24\x75\xc8\x04\xb1\xac\x2b\x5e\x95\xa4\x15\xab\xa8\xb6\x16\x27\x1d\x0a\xb0\x24\x73\xdf\xc5\x3e\xbf\xc1\x7c\x44\x43\x9a\xaf\x45\xe5\x06\x43\xf0\x88\xe9\x05\x6c\x11\x7e\x97\xb7\x58\x2d\xd9\xd4\xdc\xe7\xa2\x62\x43\xf4\xb6\xd7\x2c\x71\x09\xa2\x87\xc6\xa6\xd0\xee\x58\x1b\xda\x5a\x0d\xad\x88\x44\x05\x0b\x4e\xa6\xed\x46\x4f\xee\xfe\xaf\x5f\xa2\xef\xbe\x1e\x29\xc0\x96\x8e\xa0\x0a\x31\x62\x4a\xb1\x3d\xd4\x3b\x6a\x1a\x8e\x16\x9c\xd1\x5f\x22\xe4\xd2\x10\x72\xaf\x20\xdb\xf2\xfe\xed\xa5\x6e\xbb\x6e\x2b\x70\x85\x3a\xab\xe7\x0d\x4f\x16\x9e\xaa\xcb\xf2\x54\x5d\xea\x53\xf5\x2f\xec\x52\xc9\x19\xd4\x13\xe1\x2a\x36\x80\x23\x7d\x9b\x4e\x91\x18\x5e\x78\x2b\x4c\x41\x85\x7a\x86\x53\x3a\x9a\x78\xc4\xa3\xb6\x61\x07\xd4\x2f\xa6\x7f\x05\x53\x3a\x1a\x7b\x5a\xd6\xe6\xd0\x16\x2e\x52\xc3\x70\x46\x29\xb5\x5f\xbb\x8b\xc2\x06\xa0\x04\x17\x13\x2a\xc0\x02\xcb\x8d\x12\xef\xba\x54\xdf\x1a\x7c\x3f\x1c\x0c\xfe\xaf\x52\x29\xd5\x82\x62\x3a\x4b\xbf\x7a\x22\xbc\xb8\x94\x25\xda\xe9\xa2\x58\x2a\x35\xbe\xb1\x67\xe4\x72\x8a\x5e\x08\xee\x0b\x5a\x5e\xdc\x03\x03\x6f\x41\xe1\x62\x0d\x0d\x7e\xa7\x79\x48\xa6\x6f\xf9\xbb\xfa\x9a\x7f\x47\x7b\x12\x9f\x00\xfa\x47\xbd\xd1\x91\xf6\x5e\xe3\x8d\x85\xf7\x63\xbd\x72\xd9\x63\xc2\xef\x54\xc9\x6f\xbd\xb0\x77\xc9\x99\x5f\x65\xe6\x2f\xab\x00\xd6\x51\x40\x26\x99\x9b\x76\x2a\x75\xf1\x45\xde\xe0\x86\xbc\xf2\x61\x00\x9e\x46\x21\xfb\x0c\x93\x57\x9a\x93\xf8\xc6\xd8\x5b\x2a\x32\x81\xf5\x86\x8d\xf5\x44\x9f\x5d\x8e\x2c\xb4\x14\xee\x10\xcd\x11\x50\xcb\xd6\x2f\x72\x71\xcf\xf3\x97\xb2\x63\xce\xc8\x64\xaa\xf5\xc8\x8c\x52\x98\x93\x5b\x1f\x72\x81\xd1\xb7\x40\xf4\xbf\xc0\x29\xcb\xdf\x97\x97\x7b\x20\xfa\x47\xf0\xa1\x28\xd0\x37\x6e\x20\xfa\xc7\xb0\xcb\x72\x72\xb6\xb2\x3e\x06\x62\xf4\x6c\xbd\x51\x39\x14\x9e\x0f\xc5\x9f\xa2\x52\x9b\x5d\xe2\xc6\xe7\xc5\x32\x2f\x96\x53\xf8\xe9\x9d\xdb\x0f\x06\x08\xa2\xd5\xcf\x9c\xb6\xf1\x74\xb9\x20\x5d\x5e\x94\xac\x18\xc8\x6c\x69\xaa\x62\xee\x3b\x36\x81\xd9\x0f\xe2\x85\x21\x93\xf1\x68\x9d\xa5\xc1\x56\x55\xca\x7e\x99\x44\x11\x9b\xa4\xbc\xc3\xa2\xc8\x28\xc8\x2d\xfa\xdd\x5e\x63\x39\xb0\xd4\x5c\x5b\x69\x2e\x37\xce\x27\x18\xfa\x30\x27\xbb\x3e\x24\x10\x2a\xe6\x49\x1a\xdc\x3b\xd3\x5a\xb9\xcb\xa1\x45\x61\x3e\x5d\xb2\xc8\x9a\x4d\x4b\x8b\xac\x38\x91\xb9\xa6\xde\xf4\x78\xa3\x1b\x22\x92\x49\xb5\x65\x87\xd6\xe8\x5d\xca\x71\xf4\x26\x51\x64\x76\xe7\x9e\x22\xee\x5d\x9a\x72\xc1\xfc\x30\xf9\x97\xd4\xe4\x02\xb1\x60\x8c\xe8\x4f\xfe\x27\x55\xe3\x0b\x0a\x47\x53\x63\xf9\x7b\x3c\x35\xc1\x2b\x5f\xe3\xc3\x70\xb0\x80\x33\x7c\xfa\x7d\x01\x57\xf8\xb0\xbd\x80\x97\xd3\x56\x9f\xe2\x8a\xd0\x3c\xf8\xc3\xc1\x58\xe6\x3a\x8e\xb9\x92\x5f\xcc\xb2\xbe\x93\x24\xd3\x28\xce\x73\x62\x48\x1c\x01\xa1\x23\x21\x70\x38\xb8\x8e\xb9\xd0\xca\x28\xa4\x8e\x09\x57\x2c\x30\xc7\xf2\xf3\xd8\xf1\x40\x38\x09\x48\x27\x04\xee\x04\x90\x39\x2e\x30\x27\x55\x12\xf6\xf9\x74\xad\x9b\x59\x27\x20\x2f\xa7\xe8\x78\xf2\x4d\x62\x76\x22\xf4\x96\x3f\x85\x39\xf9\x9a\x62\xf0\x43\x1d\xaa\x71\x6f\xda\x66\x90\x67\x32\xb3\xd5\x52\x6c\x0e\xb7\xb7\x07\xb9\x75\x70\x25\x32\x40\x2d\xd8\xc0\x76\x3d\xbd\x59\x2d\xc2\xc4\x93\x9a\xeb\xe4\xa0\x9e\xd4\xac\x6e\xdc\xb7\x89\x79\x45\xda\xe8\xc1\x04\x23\xaf\x73\x67\x4a\xce\xa7\x30\x1c\x02\x46\xee\x95\x40\x32\xa7\xc1\xcd\x5d\xd4\x33\x73\xc6\xb5\x30\x07\x68\xbe\x39\x7a\x6a\xc7\xbd\x95\x61\x3c\x31\x45\x4f\xf1\xef\xd0\x8e\x37\x86\x8b\x05\xd9\x51\x84\x77\x6f\x9a\x07\x37\x2e\x22\x1f\x3f\xed\x65\xc5\xbb\xcc\x29\x23\x20\xc7\x14\x62\x87\xc4\xf7\x1a\x9c\xfe\xd2\xc0\x6e\x1e\xe4\xa0\x7d\x90\x83\xf6\x41\x66\x3a\x01\x48\x11\xa0\x39\xd6\x2f\x61\x69\xf9\xe2\xaa\xfa\x43\x2e\x2f\x6d\x06\x92\xa2\x7d\xea\x2e\xc9\xe1\xe8\x43\x2b\xa7\x88\x01\x56\xfe\x78\x3a\x12\xf6\x8c\x7c\x98\x2a\x31\xcb\xc5\x84\x7f\x30\x23\x1f\x13\x10\x1a\x73\xf8\xfa\x9d\xd1\x0d\xee\xe2\x99\xdb\x5a\xc0\xe9\xf4\x4e\x6b\xd9\x5f\xbf\x8c\x7b\xa0\x71\xda\x59\x8a\x53\xbb\xa0\xf0\x6e\xa9\x0b\x89\xe9\x90\x8c\x35\xc3\x05\x39\xf2\x14\xe0\x1c\x79\xea\xa0\x8e\x2c\xbc\xaa\x1f\x27\xb1\xbc\x54\x02\x16\x64\xed\xf7\x37\x45\xfc\xe3\xb8\x08\xc9\x2b\xe8\xda\xb0\xd0\x0f\xe2\x7e\xf7\xe1\x43\x35\xd4\x19\xc9\xd0\xe8\x84\xd3\x91\xb0\x2d\x6b\xa1\x19\x5d\x1c\xf0\x15\x58\x1d\xae\x30\x39\x2a\xc1\xf0\x95\x00\xab\x33\x4e\xb2\x94\x27\xe8\xfd\x83\xba\x2c\x2c\xb8\x05\xab\x63\xe4\xf7\xe5\x9c\xca\x65\x38\x39\x76\x8b\x9d\xb6\x85\x92\xb3\xac\xe2\x0e\xd9\x45\x8b\xb4\x73\x62\xc6\x86\xa3\x03\xf5\x43\x60\xa0\x79\x0c\xa9\xcd\x7e\xfd\xda\xc4\xb0\xc0\x6e\xd9\xde\x3c\x74\x5c\x2e\x67\x9c\xc7\xd6\x82\xd0\x9c\xf7\x3f\x27\x98\x65\x42\xf1\x10\xa7\x53\x68\xcd\x34\x17\x02\xfa\xdd\x97\x83\x6d\x54\xb9\xf9\x4c\xf2\x8e\xcb\xbc\x6b\x6b\x83\xb0\x3e\x53\xff\xb8\x1b\x31\x6d\x94\x76\x55\xd5\x40\x24\xb1\xb4\x36\x92\x0d\x12\x6e\x10\x6f\xc3\xc4\xbd\xf3\x7d\xb0\xca\xb5\x84\x14\x1d\xec\xf3\x14\xd0\x8a\x49\xd8\x9d\x96\x74\xe2\x4c\x81\xd2\x67\x8c\xec\xce\x69\x45\x24\xfa\xb8\x96\x74\x2e\xb1\x96\xa7\xb9\xdd\xc1\xbb\x69\x9e\x15\xcd\x74\x72\x38\x6d\x75\x92\xdd\x47\x51\x06\xc3\x83\xa9\x9f\x1f\xa6\x20\xe1\x94\xb6\x5d\x71\x5d\x65\xa9\x0c\x83\x79\x71\xb5\x54\xd7\x38\x57\x6c\xd8\x38\xbf\xae\xb0\xb0\xab\x8e\x19\xf8\xcd\x9f\xf5\x5c\x7a\x7b\x1c\x8e\x3d\xbc\x95\x3c\xca\x94\xd8\x73\x80\xc2\xcf\x71\xa6\xe4\x9c\xaf\x8a\x2b\xb4\x8e\x2f\x33\x0b\xac\x37\x22\xb4\xc0\x3a\x62\xd2\xfa\xbe\x12\x0e\xa8\xda\xcb\xdd\xed\xd1\x4b\x05\x7b\xa9\xe1\xb5\xa5\xc1\x14\x15\xd7\x0f\x49\x2d\xf7\x82\xd4\xd8\x55\x05\x22\xb8\x62\x2c\x8c\xcd\x1d\x73\x09\x1a\x99\x44\xc8\xb8\x9a\xd2\x8a\xe1\xc8\xc7\xfa\x06\x9e\x20\x86\x1a\x2c\xe0\x76\x6a\x02\x9e\xbe\xd2\x5c\x97\xab\xfa\x7b\xab\x9f\x27\x16\x85\x37\xed\xe0\xf2\xb3\x6b\x3f\xa9\x66\x12\xc7\xa8\x23\x6b\xab\x3f\xae\x57\xff\x6c\x6e\x6e\xa5\x48\xe2\x0b\x8b\xae\xc9\x3d\xbe\x12\xf4\xc5\xa4\xaf\x8b\xfb\xdd\xd1\xdb\xc4\x4e\x39\xd5\xee\x8e\x0a\x3f\xf3\x22\xe1\x63\xbb\xe1\x47\x9e\xa2\xaa\x92\x9d\x8a\x58\x5e\x11\xf7\xd0\x4b\xe2\x29\x17\xb2\x93\x4a\x11\xea\x91\x4d\xf5\xfd\xf6\x7b\xc3\xf5\xcf\x39\x78\x82\x52\x78\x31\x6d\x73\x14\xae\x83\xba\x0c\xc7\x7c\x12\x7a\xd7\x55\xf0\xfe\x5c\xe8\x00\xd3\xcc\xbd\xe2\x9e\xac\xda\x14\x8d\xac\xd7\xb1\x6f\xd9\xd6\x51\xae\xe5\x5c\x86\x82\xcb\x24\x13\x4d\xfa\xe2\x6c\xd2\xd3\x8e\x03\xe6\x46\xa7\x38\x53\x0a\x43\xcc\xc9\x1b\x75\x20\x07\xb0\xc6\x74\x98\xc5\x17\x11\xef\x99\xcb\x81\x53\xf3\xdd\x77\xc6\x3a\x42\x01\x9a\x1b\x65\xc2\xd2\x0b\x3d\x23\x07\xd8\x9f\x5a\x20\x8a\xac\x70\xe5\x60\x86\x01\x91\x39\x3e\x17\x7d\x76\x5d\x8d\xf3\x6a\x0d\xd0\x0e\x2b\x4f\x5b\xa1\xdd\x4b\xb0\xda\xce\x9d\xd5\xd4\x79\x38\xf3\xc8\xa6\xe6\xcd\xa7\x21\x9f\xd5\xa7\x9a\xf3\xd6\x95\x6b\x34\x25\xcf\xdf\x63\x5d\x7a\x77\x2e\x8c\xea\xa9\x5c\x9a\x15\x51\x32\x89\x56\xbf\x50\xea\xac\xec\xd5\x7d\x54\xf2\x9a\xe4\x7f\x77\x27\x87\xff\xe6\x9d\x1c\xfe\xfd\x9d\xfc\x71\xbf\x9d\xfc\xb1\x76\x27\xff\xfe\xde\x0d\xff\xe5\xbd\x53\x27\xb4\x05\x9a\xf4\xf6\x91\xb8\x1d\x3d\x69\x6f\xd9\x36\xcd\x95\x42\x37\x96\x65\xcf\xc8\x3c\x81\xdf\x41\x22\xe3\xb5\x30\x48\x4a\x8e\x0c\xcf\x77\xad\xfe\xb9\xc2\xbb\x01\xf5\xb4\xa3\xfe\xb9\x2d\x68\x37\x32\x3b\xf1\x82\xc2\x8f\x69\x53\xfa\x00\xe3\xb4\x55\x9a\x45\x34\x98\x63\x8e\x3b\xee\x45\x2f\x60\x3e\xf7\x57\xcd\x33\x37\xd1\x3e\xbc\xc5\x9c\x55\xf2\x1b\xb9\xce\x96\x75\xb9\xbc\xd1\x90\x55\x57\x59\xbe\xac\x7e\x1e\x2f\xc7\x1d\x0f\xfe\xe6\x90\x4f\xaa\x86\xcb\xe5\xbc\x1b\x6f\x53\x44\xc9\x0a\x7d\x9a\xb6\x26\xef\x6d\x33\xb9\xd6\x21\xa7\x1c\x1d\x72\x0a\x23\xb7\xba\x68\x03\xbb\x4b\x88\xbc\xc3\x7f\x20\x37\x84\x44\x57\x00\xd9\x77\x57\x39\xc5\xf5\xbe\x00\x7f\xcf\xd2\xbf\x62\xd7\x2f\x8c\x5d\xbf\x30\x76\xfd\xc7\x3e\x32\x05\x85\xbd\x3e\x3a\x76\xd4\xec\xf5\x95\x14\xa2\xed\xba\x97\xbd\x63\xee\x6f\xd7\x2d\xda\xed\xba\x7f\x4c\xd1\xb9\x86\xe7\xfb\xf1\x7e\xda\x7e\xbc\x2a\x36\xa8\x6c\xa0\x3f\x57\x7e\xe9\x13\x6a\x60\xb4\x05\xe7\xd7\xe9\x7d\x3c\xf4\x56\x12\xea\xac\x2c\xa2\x59\x38\xd9\x77\x47\x7f\xce\xc8\xdb\x69\xce\x17\x7f\xae\xe8\xae\x76\xf2\x88\xb4\xfa\x2a\xce\xee\xa0\xab\xe4\x57\x81\xc9\x64\x0a\x04\xf3\x7e\x8a\x4e\x31\xa8\x6c\x6b\xee\xe7\x43\x52\x06\xb7\xad\x5c\x12\x9a\x73\xbf\xcc\x8c\xea\x75\xac\xe7\x0c\x2f\xbd\x09\x5b\x67\xa5\x6d\x63\xf1\xdf\xde\x8c\x89\x18\xf9\x9a\x65\x0b\x66\xc5\x92\x7d\x9b\xb6\x07\x6e\x6c\xf0\x71\xac\x0e\xa7\xc2\x30\x7b\x3e\xbc\xf4\x60\x46\xbe\xea\xb9\xd7\xb3\x25\x19\x5f\xc7\x3a\x87\xcc\x7c\x35\x82\xb8\xeb\x58\x63\xd9\x7b\x6c\x81\xe8\xae\x18\x61\xac\x4e\x6c\x97\xc4\x5d\x30\x3e\x42\x23\xab\xb3\x74\x55\x6d\xd1\x9a\xf9\xec\xf5\xb4\xd9\x00\xad\xc2\x90\x15\xbb\x72\x60\x12\x39\x76\x34\xcd\xc6\x0d\xc7\xcd\x29\x37\x2b\x37\x8f\x4f\x2b\x36\x4c\x5a\x89\xfd\x22\xab\x68\x84\x87\x88\xea\x61\x99\xa0\x6a\x61\xb4\x40\xd8\x66\x21\x0b\x1a\xdb\xb4\x85\x55\x2b\x8c\x5e\xc0\xb9\x8f\x22\x65\x4d\x37\xb8\x20\xe6\x38\xc9\xae\x61\xed\x79\xd7\x59\xab\x7c\x36\x77\xf9\x88\x47\x4f\xdc\x0a\xb7\xf2\x49\x68\x7a\xb8\x53\xc2\x24\x64\xdd\xfb\x1c\x52\x85\x9f\x3a\xd6\x46\xdc\x35\xbd\xf1\x6e\xcd\xd9\x05\x59\x02\xf4\x7d\xe7\x96\x6d\x7d\xc1\x2c\x9d\x8d\xf6\x4f\xe3\xa8\xb7\xd9\x59\xb5\x81\x38\x61\x44\x76\x57\x66\x1e\x2f\x33\x0e\x4b\x6d\x0d\x50\x68\x6d\xfa\xd2\x35\x22\x2a\x99\x0b\xaa\xc0\x70\xd9\x9e\x2d\xc0\xeb\x1a\x2d\x4e\xd2\xbd\x23\x1d\x0c\xeb\x33\x9e\x87\xf5\xf3\x2a\x11\xd9\x9b\x34\x01\xda\xca\x4a\xf3\x17\x49\x94\x8d\x75\xb2\x0a\x63\xf0\x5f\x31\xad\xe8\x58\x1b\xb2\x0a\xbc\xab\x68\x43\x94\x28\x06\xef\x96\xc5\x94\x30\x9d\x5b\xc0\x33\xd7\x72\xd9\xaa\xf1\x4e\x67\x09\x86\x8a\xad\xcf\x59\xa2\x95\x83\x7c\xdf\x91\xff\xcf\x8c\xb6\x32\xb0\x1c\x55\x68\x1f\x1f\xaf\xf9\x80\xdc\xbd\xba\xc6\x83\xeb\x3f\xb1\xb8\x39\xc0\xad\x5e\x37\xde\x75\x8e\x93\x1c\x1a\x17\x14\xc2\xee\x1d\xfa\x9a\x8a\x88\x59\x39\x70\x01\x49\xba\xf0\xce\x07\x23\x3d\x82\x12\x4c\x7a\xd1\x45\x6f\x1b\x8d\x42\x9f\x98\x30\x3d\xfb\x1e\xdc\x78\x14\x6e\x3c\xe2\x75\x29\xc4\x14\x8a\x66\xaf\x4c\x2a\x95\xb2\xe5\xd6\x52\xcb\xf3\xa5\x96\xb2\xd2\xf2\x35\x1a\xcd\x37\x7e\x6e\x6f\xa9\x91\x68\x58\x14\xbd\x51\xa6\xf1\xd0\x34\x5e\xd9\x29\x75\x6c\xff\x9f\xff\xdb\x5a\x71\x6f\x5a\xda\x8b\x3a\x9a\x33\xb8\xa3\xa3\x4d\x0a\x7a\x4b\x36\x1b\x37\x1e\x61\xdd\x76\xe1\xc1\x63\x11\x57\x5c\xe7\x92\xe8\xf0\x9d\xde\xe5\x5a\x12\x0a\x78\xf4\xdf\x24\x89\x7f\xa9\x71\xed\x60\x8d\x2e\x7d\x14\xf6\x25\x4f\x25\x89\x9d\x98\x8e\x2c\x9f\x49\xd6\xb3\x36\x62\x3b\x86\x47\xff\xfd\xcf\xf4\x37\x72\xc5\xa6\x4c\x5f\xfb\xd8\xbf\x54\xa1\xad\x98\xd8\x7f\x3e\xba\x94\xe3\xa8\x68\x2a\x1c\x81\x86\x59\x18\x1f\x3d\x50\xc8\xdd\x13\xa0\xbb\xba\x10\x6c\x3c\x3e\xe7\x7e\x88\x21\x45\xf2\xf0\x5c\xe0\x76\xdb\x7c\xab\x73\xa7\x6a\xf1\xcf\xf8\xd7\x3f\xc5\xaf\x7f\xc6\xda\xb7\x3a\xed\x6a\x0f\x5a\x7e\x23\x99\xe0\xcc\xa2\x10\x75\xd7\xe6\xf8\x9a\x93\x9d\x14\xb6\x60\xf8\x18\xbe\x0a\xe2\x76\x75\xc6\x58\x34\x82\xc8\xfe\x67\x90\xa3\x58\x7f\x7e\xe3\xf2\xfc\xa6\x5d\x73\x80\xb3\xfc\x00\x4b\x3c\xc0\xfc\xde\xd8\x11\x3e\x78\x84\xa9\xa5\xfe\x37\xa0\xc9\xff\x91\x71\x37\x0d\xf1\xdf\x8d\x30\xff\x03\x0b\x5e\x90\xe8\xa5\xc9\xfc\x6d\xd4\xe9\x77\x1b\xfd\x86\x74\xb0\x5d\xde\x67\x8f\x01\x73\xd3\x75\xc1\x53\x7f\x06\x80\xae\xa3\x1f\x20\x54\xbf\xf6\x30\x63\x1d\x7b\x85\xf1\xc2\xba\xa3\x73\x62\x7d\xe0\xb3\x3c\xe3\x84\x42\x67\xaf\xd1\x1d\x0a\xc3\x01\x5b\xaf\xfd\x50\x96\x65\x6f\x39\x31\xd1\x09\xab\xd1\xdf\x5a\xee\xbf\x73\x01\x6b\x4e\xc2\x2e\x64\x7d\xf7\x33\x64\x7d\x76\x0a\x59\x3f\x55\xf3\x16\x5d\xf0\x80\x55\x90\x6b\x11\x2d\x28\xee\x1a\x3d\x64\x81\x52\x8f\xa7\x98\x0a\xf5\x84\xc2\x94\x44\x5d\xb0\x5e\xe6\xb9\xcc\xf3\xaa\xb3\xa2\xea\x91\xae\x7a\xbc\xb4\xb4\x05\xe9\x98\x93\x6f\x53\xe0\x90\xe8\x08\x57\x59\x17\xa5\x44\x35\xc6\x2b\x45\xf4\x34\xda\x9b\x76\x1d\x2e\xa0\xdb\x75\x32\x01\xe3\x46\xe4\x12\x0b\x12\x08\x54\x73\x98\xb8\x1b\x97\xdd\x7b\x1a\x7b\x4f\x5a\x2b\x7a\x0c\xc5\xc8\x1d\xc1\x3b\xf3\x24\xeb\xa4\x99\x79\x98\xb1\x58\x76\x64\xd2\xd1\xa9\xe8\x97\x58\xf2\x91\x45\x81\x6d\xdb\xeb\x0d\x6a\x4f\x18\x39\xe0\x64\x46\x2e\xf5\x5c\xab\x76\x90\x2f\x93\x38\x08\xc5\x58\xd3\x1a\xf7\x87\x7d\xc0\xc9\x71\x42\xc1\x7b\x62\x5b\xaf\xf5\xd7\xf2\x7d\xc7\x64\x3d\x6b\x98\xe3\x26\xbd\x84\x31\xca\x4c\x79\x14\x18\x6d\x53\xe9\x1e\x61\x8e\xd5\x06\x1a\xcd\x9f\xfb\x26\x60\x21\x5a\x56\xb4\x7e\x05\xdd\x30\x36\x88\xf5\xd0\x04\x57\xd3\x2e\xe4\x5a\x30\x9e\x6b\x94\x7e\xf9\xd8\xa2\x70\xb3\x66\x9c\xff\x3b\xe3\x12\xac\xde\xf7\x98\x74\x19\xab\xf4\xe4\xa2\x8b\x4c\x48\x6a\x81\xe8\xb3\xd3\xba\xe8\xa9\xcb\x91\x2b\xc2\x1a\xee\xe7\x86\x8b\x1a\xec\x02\xb7\xd7\x37\xbd\x60\x1c\x2f\x54\x91\xb8\x3e\xfc\x45\xa7\xfa\xe5\x78\xfd\xb2\x02\x5d\x27\x46\xe3\x49\x29\x9c\x7a\x44\xd0\xc6\xb0\xf6\x4b\xf3\x5c\xaf\x07\x58\x27\xe6\xad\xc4\x2e\x30\x60\xbf\x8b\xae\x3b\x95\x41\xe9\x89\x5b\xb4\xf1\x0a\xeb\xdf\xd0\xff\x2b\x1e\x71\xad\xf0\x56\xab\x2a\x56\xd4\xb0\xa5\x5e\xe7\xa3\xa7\x36\xe8\x9a\x52\x38\xf4\x48\x3c\xc2\xdb\xf1\x49\x17\x04\x3c\x18\x52\x6a\xef\xc8\x42\x2d\x2a\x17\x14\xae\xbb\x0d\x91\x11\xcd\x1a\xca\x9a\xea\x87\x57\x82\x37\x62\xe0\x0f\x13\xbe\xb5\xcd\xec\x90\xcd\x17\xda\x0e\x77\xad\x81\xad\x7b\xb2\x30\x86\xb5\x78\xe5\x4a\x29\x06\x22\x18\x77\x57\x3d\xbb\xcb\x29\x36\x74\x74\xae\xb6\x5c\x01\xd9\x4d\x37\x8f\xa7\xa0\x5e\x50\x34\x76\x5e\x60\x68\x1d\xe3\xee\x9c\x43\x55\x6e\x40\xb5\xaa\x65\x91\xad\x5a\x96\x9d\x6e\x35\x61\xcb\xd1\x1a\x29\x03\x2d\xfa\x64\x1e\x03\xf6\xc4\x83\x03\x5e\xd7\x65\x88\xe2\x72\xa0\x12\x11\x33\xf3\xc9\x3b\x46\x6e\x3d\xd4\x86\x2d\x19\x3a\xb9\xcc\xbf\xe0\x1d\xfc\xb7\x37\x09\xa3\x28\x99\x99\x1f\x66\xa4\x06\x0d\x20\x9e\x94\xc9\x64\xc9\x5d\x4d\x5f\x6d\xc7\x86\x05\x5f\xb4\x7c\xee\x3b\xde\x2d\x2c\x28\x1c\xaf\xaa\x90\xc2\x80\xe8\x4c\x20\x15\x65\x79\x8b\x31\x64\x81\xb8\xcc\x3d\xea\x91\x02\xbe\x81\x5a\xbc\x55\xe5\xf5\xbd\xba\x68\x8c\xe2\x81\xbd\xc6\x7d\xef\x0a\x14\x0c\x15\xf7\x02\xaf\xff\x37\xd3\xc1\xcf\x68\x7c\x80\x17\xba\x7d\xd7\x5f\xb4\x52\xc4\xb3\xae\x31\x3f\xbb\xd2\xe4\xc7\xb5\x28\xbc\xbc\x97\x0a\xa9\xc2\x87\x8a\x64\xd6\xe4\x07\xaf\xa4\xc3\x4d\x94\x11\xb1\xee\x52\x8c\x8a\x19\xb9\xea\x36\xf1\xa6\x4b\x37\x75\xbd\xe1\xe0\x6e\xb7\x37\x51\xc8\x7a\xe7\xcd\xfc\xa4\x68\x0d\x52\xf1\xb9\x48\x81\x60\x62\xe4\x7c\xd5\xf7\xf0\xf4\x0e\xa6\xb0\xf4\x89\x84\x5a\x58\x8a\xc2\xbc\xf7\x14\x99\xea\xff\x34\xb9\xf8\xcc\xc9\x59\xf7\x7f\x8a\x56\xac\x74\xbe\x4c\x28\x64\xb9\x89\x2f\xbb\x60\xbd\x7b\x65\x99\x98\x6f\x7e\xf1\x4e\x53\xf5\x0e\x93\x58\x74\x8e\xa5\x9f\x69\x51\xac\x98\x82\xa5\x42\x76\x5a\x16\x6a\x15\xa4\xbf\xdc\xb8\x52\xae\x75\x95\x7e\xc7\x9d\x9b\xef\xb2\x93\xb2\x2c\x67\xb5\x75\xc1\x71\x75\x40\x92\xe3\x6b\x85\x97\xcc\xce\x14\xa5\xb9\x82\xd9\x82\x65\x73\x9b\xf3\x04\xde\x79\xe6\x76\x84\x82\xd4\xf9\x85\x40\x28\x22\xc8\x35\x11\x7c\xdd\x05\x09\x0f\x06\x05\x11\x5c\x50\xd8\x33\xec\xdd\xa6\x45\x61\x5f\x3f\x7b\x89\xaf\x40\xe8\x83\xfe\x35\x51\xfb\x04\xbb\xa6\x9a\xb1\xa4\x81\x53\xfd\x3b\x66\x53\x8b\xc2\xbb\xae\xf3\xf3\xc6\x2e\x93\x48\x80\x9b\xd8\x46\xe7\x6d\x2d\xe0\xa3\x2e\xd5\x6f\xad\x05\x1c\xe6\xb5\x8b\x60\xcc\x58\x72\x94\xff\x5a\xc0\x49\x51\xa3\xc8\x40\x85\x35\xf2\x5f\x0b\xb8\x2d\x6a\x98\xdc\x38\x58\xae\x9f\x17\xf0\xaa\xdb\x1e\xad\xad\x11\xf4\xbb\x75\xb0\x7f\xd7\xad\x81\xfc\xc7\x6e\x4e\x24\x0d\x3b\x60\x0c\x39\x97\xed\x37\x4f\xba\xb5\xd4\xa9\xb7\xdd\x5a\x6a\x8f\x8f\xdd\x3a\xbc\x1f\x76\x17\x0b\x7d\x0d\x35\xb2\x3a\xa5\x4f\xef\x82\xc2\xdb\x3b\xb9\xed\xa5\xd8\x15\x1b\x33\xf2\xca\x20\xe0\x55\x17\xec\x4a\xcc\x88\x5b\x46\x44\xff\x06\xc3\xf5\x18\x9f\xa2\x9a\x87\x91\x06\x8c\x37\x48\xe8\xdf\x75\xe1\xa4\x0b\xb7\x5d\x38\xec\x82\x5a\xf1\x3c\x3c\xe1\x44\x24\x63\x2e\x2f\x79\x96\xf6\xc3\xe4\x91\x9f\x78\xa9\xde\xfc\x30\xbe\xd0\x0f\x63\x16\xb3\x0b\x2e\x1e\xe9\xad\xd9\xe5\xd1\xc4\x5a\x7c\xa7\x70\xb0\x1e\x87\x2f\xf9\x27\x18\x59\x82\xf9\x3e\xba\xfe\x59\xdb\xe8\x74\x97\x7b\xf8\x98\x48\x46\x62\xf5\x96\x16\x3e\x7b\x24\xee\x7b\xa7\x75\xbc\x6d\x0c\x9f\x3a\x93\x8a\x58\x71\xe0\x55\x2d\xa3\x3f\x77\x9d\x50\x10\x4b\x60\x90\x96\x2f\xdd\xf6\x24\xf7\x9f\x3c\xb0\x70\x39\x55\x1f\x18\xff\x0a\x3e\x77\x89\x95\xca\x79\xc4\xd3\x4b\xce\x65\x61\x5d\x15\x25\xcc\x47\xcb\x2a\x41\x3c\x0c\xa0\x5d\x98\xe4\x71\x21\x12\x61\x8a\xe2\x8c\x58\x6f\x58\x18\x71\x5f\xd1\x61\xd5\xa6\xf3\xf2\xe8\xa8\x13\x88\x64\xac\x73\x45\x51\xe3\x05\xaa\x23\xb1\x1e\xc6\xe4\xa7\xf7\xca\xbe\x02\xef\xb3\x7d\xce\xc0\x3b\xb4\x1b\x19\xcb\xee\x48\x91\x11\x4d\x6c\xd9\xc2\x56\x3f\xb6\x81\xd9\x73\xf2\x35\x02\xeb\xbf\x2c\x20\xb1\x4e\x51\xc7\x9e\x80\x7e\x37\xb2\x14\x3f\x71\x8c\x6e\x91\xef\x23\xc5\x56\x64\xb0\xab\x48\xc3\x7c\x54\x06\xa6\xb4\xcb\x68\x95\x71\xdf\xf5\x14\x7f\xd9\x67\x19\x26\x03\x00\x6f\xdb\xee\x4a\xf2\x43\x50\xf0\x9e\xd9\xfb\x11\xf8\x6e\xc3\xc8\x14\x1f\x72\x48\xfe\x7c\xaf\x97\x1e\x39\x80\x9d\x0a\xc8\x58\x8b\xc5\x82\x3e\x67\x89\xf3\xf3\x80\x85\xb1\xfd\x33\x8c\x43\x69\xff\x10\xe4\x30\xa4\x64\xa0\x3e\x12\xf7\x5f\x47\xe3\x51\xde\x6f\xc7\x98\x4f\x05\x89\x20\xc8\xa3\x77\xc2\xb8\x23\x29\xfe\x11\x23\x0c\x84\x65\x39\x0e\x1f\x4d\xc8\x13\x6a\xc7\x44\xfc\xc9\xbf\x83\xfc\x93\x7f\xa7\xb6\x7a\x74\xd4\xe3\x82\x60\x97\xc0\x12\x6a\xe3\x93\xc3\x92\x05\x51\x6c\x10\x7d\xfe\xff\x06\x00\x00\xff\xff\xed\x2b\x91\x4f\xe5\xb1\x01\x00"), }, "/templates": &vfsgen۰DirInfo{ name: "templates", @@ -161,9 +161,9 @@ var Assets = func() http.FileSystem { "/templates/default.tmpl": &vfsgen۰CompressedFileInfo{ name: "default.tmpl", modTime: time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC), - uncompressedSize: 8101, + uncompressedSize: 8398, - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xec\x59\xcf\x6f\xeb\x36\x0c\xbe\xe7\xaf\x20\xfc\x76\x68\x0e\xf5\x1b\x76\x2c\x50\x0c\x0f\xc3\x7e\x1c\xba\x61\x68\xd1\x5d\x86\x21\x50\x6d\xc6\x55\x2b\x4b\xae\x44\x27\x0d\xd2\xfc\xef\x83\x6c\xc7\x91\x2d\x27\x91\xd3\xec\xb4\xdc\x12\x99\xfc\x48\x7f\x1f\x4d\xca\xf2\x7a\x0d\x29\xce\xb9\x44\x88\x66\x33\x26\x50\x53\xce\x24\xcb\x50\x47\xb0\xd9\x7c\x73\xfe\xaf\xd7\x80\x32\x85\xcd\x66\xb2\xd7\xe5\xf1\xfe\xce\x7a\xad\xd7\x10\xff\xfc\x4e\xa8\x25\x13\x8f\xf7\x77\xb0\xd9\x7c\xfd\xf2\xb5\xb2\x33\x3f\x6a\x4c\x90\x2f\x50\xdf\x5a\xa3\xfb\xe6\x0f\x7c\x40\xa9\xc5\x5b\x89\x7a\x55\xbb\x37\x81\xba\x91\x4c\xf9\xf4\x82\x09\xd9\x08\x7f\x5b\xef\x07\x62\x54\x1a\xf8\x00\x52\x8f\x45\x81\xba\x76\xe5\x73\xc0\xb7\xf6\x62\x34\xe7\x9a\xcb\xcc\xfa\xdc\x58\x9f\xea\x86\x4c\xfc\x4b\xb5\x0a\x1f\x20\x50\xba\x11\xff\x01\x6b\xf4\xab\x56\x65\x71\xc7\x9e\x50\x98\xf8\x41\x69\xc2\xf4\x4f\xc6\xb5\x89\xff\x62\xa2\x44\x1b\xf0\x45\x71\x09\x11\x58\x54\xa8\x43\x66\x04\x57\x16\x2b\xfe\x49\xe5\xb9\x92\xb5\xf3\xb4\x59\x73\xf0\xa6\xb0\xd9\x5c\xad\xd7\xb0\xe4\xf4\xdc\x35\x8e\xef\x31\x57\x0b\xec\x46\xff\x83\xe5\x68\x1a\x46\x87\xa2\xb7\x89\x4f\xdb\x5f\x7b\x64\x4a\xd1\x24\x9a\x17\xc4\x95\x8c\x0e\x70\x4c\xf8\x4e\xb5\xa4\x33\xc1\x0d\x35\xa6\x9a\xc9\x0c\x21\x86\xcd\xa6\xce\xeb\x66\xb2\x5b\xf4\x79\xb2\xac\x5c\x57\x44\xda\xf4\xed\xbf\x5b\x68\x6f\xa0\x49\xac\x0e\xfe\x4d\x4a\x45\xcc\xe6\xd4\x81\x74\x96\x4f\xc3\x7d\x50\xa5\x4e\xf0\xa6\x16\x13\x25\x6a\x46\x4a\xd7\x95\x38\x19\x20\xea\x20\x05\xb3\x9c\xe9\xd7\x54\x2d\xa5\xc7\xc5\x24\x94\x8c\xc0\xac\x27\xe3\xe9\x08\x45\x0e\x22\x64\x32\xcc\x88\x11\x2c\x79\x8d\x53\x9c\xb3\x52\x50\x4c\x9c\x04\x36\x54\x10\xe6\x85\x60\xd4\x7d\x38\xe3\x7d\x35\xd8\xc5\x29\x8d\x6d\x0f\xf9\x10\x54\xb7\x09\x05\xe2\xcd\x99\x10\x4f\x2c\x79\xf5\xf0\x06\xd3\xb7\xa0\xf0\x01\xc7\x0c\x05\x97\xaf\xc1\x19\x24\x4d\x06\x3c\x8d\xc2\x1c\x0a\x8d\xb6\xd6\x02\xad\x9d\x84\x0e\x32\x56\xf5\xe0\xc0\x94\x79\xa2\x24\xe6\xea\x85\x47\xe1\xf6\xa5\x16\xa1\x19\x87\xdf\xdc\x5c\x29\xaa\x27\x8e\x53\x83\xae\x79\x61\x6f\x2d\x2d\x69\xd5\xba\xf8\x0d\x6d\x5c\x39\xfa\x88\x89\xe0\x28\xe9\xf4\x82\xdc\x87\xb8\x9b\x8a\xa7\x69\xe6\xe3\x72\x69\x88\xc9\x04\xcd\x00\xae\xd7\xc1\xe3\xfd\xac\xaa\xc2\x64\x28\x39\xb6\xc0\x39\x1a\xc3\xb2\xd3\x9e\x6f\x0f\xcc\x57\xa8\x19\x78\x7b\x1a\xda\xe0\x84\x9b\xf4\xe6\x6b\x67\x80\x4f\xe1\x7b\xb8\xb6\x8d\xb3\x5a\x84\x7a\xb1\x6a\x9d\x87\x19\xe9\xee\x02\xaa\x20\xd7\xce\x1d\x0d\xc4\xbb\x47\xa3\xc4\x02\xd3\x5e\xc4\xed\x72\x78\xcc\xad\x87\x17\xf5\x3a\x84\x52\x53\xf5\xf1\xf1\xd5\xd4\x51\x7d\x89\xc9\x33\xa3\xb1\x9a\x4f\x2e\xfa\x1d\xd0\xcf\xdd\x28\x3f\x6a\xe1\xe1\x0d\xea\xb3\x47\xf5\x9e\x3e\xa4\x66\x76\x58\xee\xed\xa4\xbe\x79\xc1\x34\xad\x46\xd8\x13\xcb\x42\xad\x59\x86\x92\x66\xfd\x11\xd7\xad\xaf\x05\x4f\x48\x69\x55\x98\x5d\xd9\x12\x23\x9c\x75\x0b\xed\x52\x4b\xe3\x7a\x81\xcf\x2a\x4a\xe2\xb4\x9a\xa5\xdc\x14\x82\xad\x66\x7b\x76\x53\xc7\x1b\xb7\x8f\x9c\x2b\xc9\x49\x59\x42\x66\xa4\x94\x18\x39\x12\x3b\xb3\xab\x34\xcf\x6a\x81\xfa\x0c\xfb\x47\x0f\xea\xbf\xaf\xa7\xf3\x94\x53\x78\x35\x9d\xaf\x98\xfc\x2d\xfd\x21\x26\x77\x7b\xba\x31\x33\xc5\xdd\xcd\x49\xe7\x61\xdf\xbd\xa6\x8f\x7f\x47\x70\x70\x2e\xf2\x8e\x91\xd7\x65\x91\x50\x60\xa6\x59\x3e\x44\xe5\xff\x96\x94\x94\x9b\x44\xe9\x74\xb7\x37\x57\x92\x76\xdb\x7d\xbf\x14\xfb\xf6\xa7\x37\xae\x3e\xd2\x45\x0d\xbb\xad\x78\xc2\xf7\xcb\xa3\xfe\x69\x1e\x73\x43\xc8\x72\xb7\xf9\xe6\x39\xd3\xab\x93\xea\xb4\x8f\x75\x7a\xc5\x7b\x48\xcd\x49\x40\x88\x4c\x5f\x60\x94\x50\xce\xf1\xdc\xa7\x15\x6b\x43\x87\x6a\x36\x10\xfc\x04\xf1\x16\x3f\x9c\x8f\x72\x17\xeb\x42\xfa\x10\xe9\x2f\x5c\xb3\xb3\x3c\x2e\x1d\xa0\xde\x59\xc7\x85\xf3\x49\xf5\x1a\x33\xc8\x55\xa1\xb9\xd2\xdc\xbe\xa1\x5e\x37\x6f\x3b\xdf\x6d\x97\xe0\xe6\x16\xa2\x68\xfb\x12\xb4\x3d\xff\xee\xdc\xad\xf5\x01\x00\xa8\xfc\x0c\x2e\x70\xeb\xc7\x65\x8a\xef\xdb\x23\x78\x88\xb6\x97\xa2\x8e\x07\x9f\xc3\x15\xbe\x39\x8e\x51\xa2\x39\xf1\x84\x89\x68\xda\x1a\xb6\xf0\x6d\x5a\xb7\x10\xfd\xc6\xb3\xe7\x2e\x16\x0a\x83\x15\x20\x93\x69\x1f\x75\xc9\xb4\xe4\x32\x8b\xa6\x70\x25\xd1\x01\xaa\x61\xa6\x47\x62\xfd\x8e\x29\x2f\xf3\xf0\x68\x5c\xce\x95\x0d\x65\x57\x77\xa1\x8e\x86\xb9\x53\xcb\x5e\x0c\x99\xb6\x9a\xb8\xbf\xeb\x6f\x6a\x2e\x74\xc7\xad\xab\x53\x5b\x18\x5e\xec\x51\x6a\x8d\x56\x2c\x40\xb5\xb3\x2b\x17\xa4\xde\xf9\x14\x3c\xae\x62\x5f\xc9\x63\xca\xee\x90\xfa\x57\xdd\x56\xa7\x55\xf2\x8a\xd4\x3d\x36\x3a\x79\x52\x0d\x80\x31\xc1\x99\x39\xfd\xe0\x7d\x5f\x7a\x9f\xfe\x5a\x32\x00\x7c\xf8\x73\xc9\x80\xc3\xb1\x6f\x26\x43\xc9\x7b\x1f\x4e\xfe\x0d\x00\x00\xff\xff\x74\x5d\xc4\xb5\xa5\x1f\x00\x00"), + compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xec\x59\xcf\x6f\xeb\x36\x0c\xbe\xe7\xaf\x10\xfc\x76\x68\x0e\xf5\x1b\x76\x2c\x50\x0c\x0f\xc3\x7e\x1c\xba\x61\x68\xd1\x5d\x86\x21\x50\x6d\xc6\x55\x2b\x4b\xae\x44\x27\x0d\xd2\xfc\xef\x83\x6c\xc7\x91\x2c\x3b\x91\xdd\xec\xb4\xdc\x12\x99\xfc\x48\x7f\x1f\x4d\xca\xf2\x76\x4b\x52\x58\x32\x01\x24\x5a\x2c\x28\x07\x85\x39\x15\x34\x03\x15\x91\xdd\xee\x9b\xf5\x7f\xbb\x25\x20\x52\xb2\xdb\xcd\x06\x5d\x1e\xef\xef\x8c\xd7\x76\x4b\xe2\x9f\xdf\x11\x94\xa0\xfc\xf1\xfe\x8e\xec\x76\x5f\xbf\x7c\xad\xec\xf4\x8f\x0a\x12\x60\x2b\x50\xb7\xc6\xe8\xbe\xf9\x43\x3e\x48\xa9\xf8\x5b\x09\x6a\x53\xbb\x37\x81\xdc\x48\xba\x7c\x7a\x81\x04\x4d\x84\xbf\x8d\xf7\x03\x52\x2c\x35\xf9\x20\x28\x1f\x8b\x02\x54\xed\xca\x96\x04\xde\xda\x8b\xd1\x92\x29\x26\x32\xe3\x73\x63\x7c\xaa\x1b\xd2\xf1\x2f\xd5\x2a\xf9\x20\x1c\x84\x1d\xf1\x1f\x62\x8c\x7e\x55\xb2\x2c\xee\xe8\x13\x70\x1d\x3f\x48\x85\x90\xfe\x49\x99\xd2\xf1\x5f\x94\x97\x60\x02\xbe\x48\x26\x48\x44\x0c\x2a\xa9\x43\x66\x48\xae\x0c\x56\xfc\x93\xcc\x73\x29\x6a\xe7\x79\xb3\x66\xe1\xcd\xc9\x6e\x77\xb5\xdd\x92\x35\xc3\x67\xd7\x38\xbe\x87\x5c\xae\xc0\x8d\xfe\x07\xcd\x41\x37\x8c\xf6\x45\x6f\x13\x9f\xb7\xbf\x06\x64\x4a\x41\x27\x8a\x15\xc8\xa4\x88\x8e\x70\x8c\xf0\x8e\xb5\xa4\x0b\xce\x34\x36\xa6\x8a\x8a\x0c\x48\x4c\x76\xbb\x3a\xaf\x9b\xd9\x61\xd1\xe7\xc9\xb0\x72\x5d\x11\x69\xd2\x37\xff\x6e\x49\x7b\x03\x4d\x62\x75\xf0\x6f\x42\x48\xa4\x26\x27\x07\xd2\x5a\x9e\x86\xfb\x20\x4b\x95\xc0\x4d\x2d\x26\x08\x50\x14\xa5\xaa\x2b\x71\xd6\x43\xd4\x51\x0a\x16\x39\x55\xaf\xa9\x5c\x0b\x8f\x8b\x59\x28\x19\x81\x59\xcf\xc6\xd3\x11\x8a\x1c\x44\xc8\xac\x9f\x11\xcd\x69\xf2\x1a\xa7\xb0\xa4\x25\xc7\x18\x19\x72\x68\xa8\x40\xc8\x0b\x4e\xd1\x7d\x38\xe3\xa1\x1a\x74\x71\x4a\x6d\xda\x43\xde\x07\xe5\x36\xa1\x40\xbc\x25\xe5\xfc\x89\x26\xaf\x1e\x5e\x6f\xfa\x06\x94\x7c\x90\x53\x86\x9c\x89\xd7\xe0\x0c\x92\x26\x03\x96\x46\x61\x0e\x85\x02\x53\x6b\x81\xd6\x56\x42\x47\x19\xab\x7a\x70\x60\xca\x2c\x91\x02\x72\xf9\xc2\xa2\x70\xfb\x52\xf1\xd0\x8c\xc3\x6f\x6e\x29\x25\xd6\x13\xc7\xaa\x41\xdb\xbc\x30\xb7\x96\x96\xb8\x69\x5d\xfc\x86\x36\xae\x1c\x7d\xc4\x84\x33\x10\x38\xbd\x20\x87\x10\x0f\x53\x71\x9a\x66\x3e\x2e\x13\x1a\xa9\x48\x40\xf7\xe0\x7a\x1d\x3c\x1e\x66\x55\x16\x3a\x03\xc1\xa0\x05\xce\x41\x6b\x9a\x4d\x7b\xbe\x3d\x30\x5f\xa1\x66\xe0\x0d\x34\xb4\xde\x09\x37\xeb\xcc\x57\x67\x80\xcf\xc9\xf7\xe4\xda\x34\xce\x6a\x91\xd4\x8b\x55\xeb\x3c\xce\x88\xbb\x0b\xa8\x82\x5c\x5b\x77\xd4\x13\xef\x1e\xb4\xe4\x2b\x48\x3b\x11\xf7\xcb\xe1\x31\xf7\x1e\x5e\xd4\xeb\x10\x4a\x75\xd5\xc7\xc7\x57\x93\xa3\xfa\x1a\x92\x67\x8a\x63\x35\x9f\x5d\xf4\x3b\xa2\x9f\xbd\x51\x7e\x54\xdc\xc3\xeb\xd5\x67\x40\xf5\x8e\x3e\x28\x17\x66\x58\x0e\x76\x52\xdf\xbc\xa0\x0a\x37\x23\xec\x91\x66\xa1\xd6\x34\x03\x81\x8b\xee\x88\x73\xeb\x6b\xc5\x12\x94\x4a\x16\xfa\x50\xb6\x48\x11\x16\x6e\xa1\x5d\x6a\x69\x5c\x2f\xf0\x59\x05\x81\x0c\x37\x8b\x94\xe9\x82\xd3\xcd\x62\x60\x37\x75\xba\x71\xfb\xc8\xb9\x14\x0c\xa5\x21\x64\x81\x52\xf2\x91\x23\xd1\x99\x5d\xa5\x7e\x96\x2b\x50\x67\xd8\x3f\x7a\x50\xff\x7d\x3d\x9d\xa7\x9c\xc2\xab\xe9\x7c\xc5\xe4\x6f\xe9\x8f\x31\x79\xd8\xd3\x8d\x99\x29\xf6\x6e\x4e\x58\x0f\xfb\xe1\x35\x7d\xfc\x3b\x82\x85\x73\x91\x77\x8c\xbc\x36\x8b\x08\x1c\x32\x45\xf3\x3e\x2a\xff\xb7\xa4\xa4\x4c\x27\x52\xa5\x87\xbd\xb9\x14\x78\xd8\xee\xfb\xa5\xd8\xb5\x9f\xde\xb8\xba\x48\x17\x35\xcc\xb6\xe2\x09\xde\x2f\x8f\xfa\xa7\x79\xcc\x35\x02\xcd\xed\xe6\x9b\xe7\x54\x6d\x26\xd5\x69\x17\x6b\x7a\xc5\x7b\x48\xcd\x49\x40\x88\x4c\x5f\xc8\x28\xa1\xac\xe3\xb9\x4f\x2b\xd6\x86\x0e\xd5\xac\x27\xf8\x04\xf1\x56\x3f\x9c\x8f\x72\x1b\xeb\x42\x7a\x1f\xe9\x2f\x4c\xd1\xb3\x3c\x2e\x0e\x50\xe7\xac\xe3\xc2\xf9\xac\x7a\x8d\xe9\xe5\xaa\x50\x4c\x2a\x66\xde\x50\xaf\x9b\xb7\x9d\xef\xf6\x4b\xe4\xe6\x96\x44\xd1\xfe\x25\x68\x7f\xfe\xed\xdc\xad\xf1\x21\x84\x90\xca\x4f\xc3\x0a\xf6\x7e\x4c\xa4\xf0\xbe\x3f\x82\x27\xd1\xfe\x52\xe4\x78\xb0\x25\xb9\x82\x37\xcb\x31\x4a\x14\x43\x96\x50\x1e\xcd\x5b\xc3\x16\xbe\x4d\xeb\x96\x44\xbf\xb1\xec\xd9\xc5\x02\xae\xa1\x02\xa4\x22\xed\xa2\xae\xa9\x12\x4c\x64\xd1\x9c\x5c\x09\xb0\x80\x6a\x98\xf9\x89\x58\xbf\x43\xca\xca\x3c\x3c\x1a\x13\x4b\x69\x42\x99\xd5\x43\xa8\x93\x61\xee\xe4\xba\x13\x43\xa4\xad\x26\xf6\xef\xfa\x9b\x9a\x0d\xed\xb8\xb9\x3a\xb5\x85\xe1\xc5\x1e\xa5\xd6\x68\xc5\x02\x54\x3b\xbb\x72\x41\xea\x9d\x4f\xc1\xd3\x2a\x76\x95\x3c\xa5\xec\x01\xa9\x7b\xd5\x6e\x75\x4a\x26\xaf\x80\xee\xb1\xd1\xe4\x49\xd5\x03\x46\x39\xa3\x7a\xfa\xc1\xfb\x50\x7a\x9f\xfe\x5a\xd2\x03\x7c\xfc\x73\x49\x8f\xc3\xa9\x6f\x26\x7d\xc9\x7b\x1f\x4e\x9c\x49\x4f\x11\x41\xe5\x52\xe3\x65\xd4\x7b\x63\xe7\xdf\x00\x00\x00\xff\xff\xa7\x3a\x7d\xf7\xce\x20\x00\x00"), }, "/templates/email.tmpl": &vfsgen۰CompressedFileInfo{ name: "email.tmpl", diff --git a/vendor/github.com/prometheus/alertmanager/featurecontrol/featurecontrol.go b/vendor/github.com/prometheus/alertmanager/featurecontrol/featurecontrol.go index 1df215acb1b..b4b7cf4735c 100644 --- a/vendor/github.com/prometheus/alertmanager/featurecontrol/featurecontrol.go +++ b/vendor/github.com/prometheus/alertmanager/featurecontrol/featurecontrol.go @@ -113,7 +113,7 @@ func NewFlags(logger *slog.Logger, features string) (Flagger, error) { return NoopFlags{}, nil } - for _, feature := range strings.Split(features, ",") { + for feature := range strings.SplitSeq(features, ",") { switch feature { case FeatureReceiverNameInMetrics: opts = append(opts, enableReceiverNameInMetrics()) @@ -131,7 +131,7 @@ func NewFlags(logger *slog.Logger, features string) (Flagger, error) { opts = append(opts, enableAutoGOMAXPROCS()) logger.Warn("Automatically set GOMAXPROCS to match Linux container CPU quota") default: - return nil, fmt.Errorf("Unknown option '%s' for --enable-feature", feature) + return nil, fmt.Errorf("unknown option '%s' for --enable-feature", feature) } } diff --git a/vendor/github.com/prometheus/alertmanager/matcher/compat/parse.go b/vendor/github.com/prometheus/alertmanager/matcher/compat/parse.go index 667b6cd7cb1..7ba385e648f 100644 --- a/vendor/github.com/prometheus/alertmanager/matcher/compat/parse.go +++ b/vendor/github.com/prometheus/alertmanager/matcher/compat/parse.go @@ -138,7 +138,7 @@ func FallbackMatcherParser(l *slog.Logger) ParseMatcher { } // If the input is valid in both parsers, but produces different results, // then there is disagreement. - if nErr == nil && cErr == nil && !reflect.DeepEqual(nMatcher, cMatcher) { + if cErr == nil && !reflect.DeepEqual(nMatcher, cMatcher) { l.Warn("Matchers input has disagreement", "input", input, "origin", origin) return cMatcher, nil } @@ -179,7 +179,7 @@ func FallbackMatchersParser(l *slog.Logger) ParseMatchers { // If the input is valid in both parsers, but produces different results, // then there is disagreement. We need to compare to labels.Matchers(cMatchers) // as cMatchers is a []*labels.Matcher not labels.Matchers. - if nErr == nil && cErr == nil && !reflect.DeepEqual(nMatchers, labels.Matchers(cMatchers)) { + if cErr == nil && !reflect.DeepEqual(nMatchers, labels.Matchers(cMatchers)) { l.Warn("Matchers input has disagreement", "input", input, "origin", origin) return cMatchers, nil } @@ -190,7 +190,7 @@ func FallbackMatchersParser(l *slog.Logger) ParseMatchers { // isValidClassicLabelName returns true if the string is a valid classic label name. func isValidClassicLabelName(_ *slog.Logger) func(model.LabelName) bool { return func(name model.LabelName) bool { - return name.IsValid() + return model.LegacyValidation.IsValidLabelName(string(name)) } } diff --git a/vendor/github.com/prometheus/alertmanager/matcher/parse/token.go b/vendor/github.com/prometheus/alertmanager/matcher/parse/token.go index 96baeeef43b..3e73fb8536a 100644 --- a/vendor/github.com/prometheus/alertmanager/matcher/parse/token.go +++ b/vendor/github.com/prometheus/alertmanager/matcher/parse/token.go @@ -16,6 +16,7 @@ package parse import ( "errors" "fmt" + "slices" "strconv" "unicode/utf8" ) @@ -73,12 +74,7 @@ func (t token) isEOF() bool { // isOneOf returns true if the token is one of the specified kinds. func (t token) isOneOf(kinds ...tokenKind) bool { - for _, k := range kinds { - if k == t.kind { - return true - } - } - return false + return slices.Contains(kinds, t.kind) } // unquote the value in token. If unquoted returns it unmodified. diff --git a/vendor/github.com/prometheus/alertmanager/pkg/labels/parse.go b/vendor/github.com/prometheus/alertmanager/pkg/labels/parse.go index f84b29e84b1..13360174c90 100644 --- a/vendor/github.com/prometheus/alertmanager/pkg/labels/parse.go +++ b/vendor/github.com/prometheus/alertmanager/pkg/labels/parse.go @@ -127,8 +127,8 @@ func ParseMatcher(s string) (_ *Matcher, err error) { expectTrailingQuote bool ) - if strings.HasPrefix(rawValue, "\"") { - rawValue = strings.TrimPrefix(rawValue, "\"") + if after, ok := strings.CutPrefix(rawValue, "\""); ok { + rawValue = after expectTrailingQuote = true } diff --git a/vendor/github.com/prometheus/alertmanager/template/default.tmpl b/vendor/github.com/prometheus/alertmanager/template/default.tmpl index 57e877c0c21..cdbceb5324b 100644 --- a/vendor/github.com/prometheus/alertmanager/template/default.tmpl +++ b/vendor/github.com/prometheus/alertmanager/template/default.tmpl @@ -217,3 +217,14 @@ Alerts Resolved: {{ define "rocketchat.default.emoji" }}{{ end }} {{ define "rocketchat.default.iconurl" }}{{ end }} {{ define "rocketchat.default.text" }}{{ end }} + +{{ define "mattermost.default.text" }} +{{ if gt (len .Alerts.Firing) 0 }} +# Alerts Firing: +{{ template "__text_alert_list_markdown" .Alerts.Firing }} +{{ end }} +{{ if gt (len .Alerts.Resolved) 0 }} +# Alerts Resolved: +{{ template "__text_alert_list_markdown" .Alerts.Resolved }} +{{ end }} +{{ end }} \ No newline at end of file diff --git a/vendor/github.com/prometheus/alertmanager/template/template.go b/vendor/github.com/prometheus/alertmanager/template/template.go index 92f8323e256..b361ec03de5 100644 --- a/vendor/github.com/prometheus/alertmanager/template/template.go +++ b/vendor/github.com/prometheus/alertmanager/template/template.go @@ -15,11 +15,13 @@ package template import ( "bytes" + "encoding/json" tmplhtml "html/template" "io" "net/url" "path" "path/filepath" + "reflect" "regexp" "sort" "strings" @@ -30,6 +32,7 @@ import ( "github.com/prometheus/common/model" "golang.org/x/text/cases" "golang.org/x/text/language" + "gopkg.in/yaml.v2" "github.com/prometheus/alertmanager/asset" "github.com/prometheus/alertmanager/types" @@ -131,7 +134,7 @@ func (t *Template) FromGlob(path string) error { } // ExecuteTextString needs a meaningful doc comment (TODO(fabxc)). -func (t *Template) ExecuteTextString(text string, data interface{}) (string, error) { +func (t *Template) ExecuteTextString(text string, data any) (string, error) { if text == "" { return "", nil } @@ -149,7 +152,7 @@ func (t *Template) ExecuteTextString(text string, data interface{}) (string, err } // ExecuteHTMLString needs a meaningful doc comment (TODO(fabxc)). -func (t *Template) ExecuteHTMLString(html string, data interface{}) (string, error) { +func (t *Template) ExecuteHTMLString(html string, data any) (string, error) { if html == "" { return "", nil } @@ -166,7 +169,7 @@ func (t *Template) ExecuteHTMLString(html string, data interface{}) (string, err return buf.String(), err } -type FuncMap map[string]interface{} +type FuncMap map[string]any var DefaultFuncs = FuncMap{ "toUpper": strings.ToUpper, @@ -186,6 +189,10 @@ var DefaultFuncs = FuncMap{ "safeHtml": func(text string) tmplhtml.HTML { return tmplhtml.HTML(text) }, + "safeUrl": func(text string) tmplhtml.URL { + return tmplhtml.URL(text) + }, + "urlUnescape": url.QueryUnescape, "reReplaceAll": func(pattern, repl, text string) string { re := regexp.MustCompile(pattern) return re.ReplaceAllString(text, repl) @@ -207,6 +214,13 @@ var DefaultFuncs = FuncMap{ }, "since": time.Since, "humanizeDuration": commonTemplates.HumanizeDuration, + "toJson": func(v any) (string, error) { + bytes, err := json.Marshal(v) + if err != nil { + return "", err + } + return string(bytes), nil + }, } // Pair is a key/value string pair. @@ -423,3 +437,66 @@ func (t *Template) Data(recv string, groupLabels model.LabelSet, alerts ...*type return data } + +type TemplateFunc func(string) (string, error) + +// DeepCopyWithTemplate returns a deep copy of a map/slice/array/string/int/bool or combination thereof, executing the +// provided template (with the provided data) on all string keys or values. All maps are connverted to +// map[string]any, with all non-string keys discarded. +func DeepCopyWithTemplate(value any, tmplTextFunc TemplateFunc) (any, error) { + if value == nil { + return value, nil + } + + valueMeta := reflect.ValueOf(value) + switch valueMeta.Kind() { + + case reflect.String: + parsed, ok := tmplTextFunc(value.(string)) + if ok == nil { + var inlineType any + err := yaml.Unmarshal([]byte(parsed), &inlineType) + if err != nil || (inlineType != nil && reflect.TypeOf(inlineType).Kind() == reflect.String) { + // ignore error, thus the string is not an interface + return parsed, ok + } + return DeepCopyWithTemplate(inlineType, tmplTextFunc) + } + return parsed, ok + + case reflect.Array, reflect.Slice: + arrayLen := valueMeta.Len() + converted := make([]any, arrayLen) + for i := range arrayLen { + var err error + converted[i], err = DeepCopyWithTemplate(valueMeta.Index(i).Interface(), tmplTextFunc) + if err != nil { + return nil, err + } + } + return converted, nil + + case reflect.Map: + keys := valueMeta.MapKeys() + converted := make(map[string]any, len(keys)) + + for _, keyMeta := range keys { + var err error + strKey, isString := keyMeta.Interface().(string) + if !isString { + continue + } + strKey, err = tmplTextFunc(strKey) + if err != nil { + return nil, err + } + converted[strKey], err = DeepCopyWithTemplate(valueMeta.MapIndex(keyMeta).Interface(), tmplTextFunc) + if err != nil { + return nil, err + } + } + return converted, nil + default: + return value, nil + } +} diff --git a/vendor/github.com/prometheus/alertmanager/types/types.go b/vendor/github.com/prometheus/alertmanager/types/types.go index 727ac320e3e..2cf9dd4501d 100644 --- a/vendor/github.com/prometheus/alertmanager/types/types.go +++ b/vendor/github.com/prometheus/alertmanager/types/types.go @@ -14,6 +14,7 @@ package types import ( + "context" "fmt" "strings" "sync" @@ -471,7 +472,7 @@ func (a *Alert) Merge(o *Alert) *Alert { // maintain an underlying AlertMarker are expected to update it during a call of // Mutes. type Muter interface { - Mutes(model.LabelSet) bool + Mutes(ctx context.Context, lset model.LabelSet) bool } // A TimeMuter determines if the time is muted by one or more active or mute @@ -482,10 +483,10 @@ type TimeMuter interface { } // A MuteFunc is a function that implements the Muter interface. -type MuteFunc func(model.LabelSet) bool +type MuteFunc func(ctx context.Context, lset model.LabelSet) bool // Mutes implements the Muter interface. -func (f MuteFunc) Mutes(lset model.LabelSet) bool { return f(lset) } +func (f MuteFunc) Mutes(ctx context.Context, lset model.LabelSet) bool { return f(ctx, lset) } // A Silence determines whether a given label set is muted. type Silence struct { diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 7b762370e27..8f8dc65d38e 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -220,7 +220,7 @@ func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) return extractSummary(o, f), nil case dto.MetricType_UNTYPED: return extractUntyped(o, f), nil - case dto.MetricType_HISTOGRAM: + case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: return extractHistogram(o, f), nil } return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType()) @@ -403,9 +403,13 @@ func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector { infSeen = true } + v := q.GetCumulativeCountFloat() + if v <= 0 { + v = float64(q.GetCumulativeCount()) + } samples = append(samples, &model.Sample{ Metric: model.Metric(lset), - Value: model.SampleValue(q.GetCumulativeCount()), + Value: model.SampleValue(v), Timestamp: timestamp, }) } @@ -428,9 +432,13 @@ func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector { } lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") + v := m.Histogram.GetSampleCountFloat() + if v <= 0 { + v = float64(m.Histogram.GetSampleCount()) + } count := &model.Sample{ Metric: model.Metric(lset), - Value: model.SampleValue(m.Histogram.GetSampleCount()), + Value: model.SampleValue(v), Timestamp: timestamp, } samples = append(samples, count) diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index 8dbf6d04ed6..21b93bca362 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -160,38 +160,38 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E n, err = w.WriteString("# HELP ") written += n if err != nil { - return + return written, err } n, err = writeName(w, compliantName) written += n if err != nil { - return + return written, err } err = w.WriteByte(' ') written++ if err != nil { - return + return written, err } n, err = writeEscapedString(w, *in.Help, true) written += n if err != nil { - return + return written, err } err = w.WriteByte('\n') written++ if err != nil { - return + return written, err } } n, err = w.WriteString("# TYPE ") written += n if err != nil { - return + return written, err } n, err = writeName(w, compliantName) written += n if err != nil { - return + return written, err } switch metricType { case dto.MetricType_COUNTER: @@ -208,39 +208,41 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E n, err = w.WriteString(" unknown\n") case dto.MetricType_HISTOGRAM: n, err = w.WriteString(" histogram\n") + case dto.MetricType_GAUGE_HISTOGRAM: + n, err = w.WriteString(" gaugehistogram\n") default: return written, fmt.Errorf("unknown metric type %s", metricType.String()) } written += n if err != nil { - return + return written, err } if toOM.withUnit && in.Unit != nil { n, err = w.WriteString("# UNIT ") written += n if err != nil { - return + return written, err } n, err = writeName(w, compliantName) written += n if err != nil { - return + return written, err } err = w.WriteByte(' ') written++ if err != nil { - return + return written, err } n, err = writeEscapedString(w, *in.Unit, true) written += n if err != nil { - return + return written, err } err = w.WriteByte('\n') written++ if err != nil { - return + return written, err } } @@ -304,7 +306,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E ) written += n if err != nil { - return + return written, err } } n, err = writeOpenMetricsSample( @@ -314,7 +316,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E ) written += n if err != nil { - return + return written, err } n, err = writeOpenMetricsSample( w, compliantName, "_count", metric, "", 0, @@ -325,7 +327,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Summary.GetCreatedTimestamp()) n += createdTsBytesWritten } - case dto.MetricType_HISTOGRAM: + case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: if metric.Histogram == nil { return written, fmt.Errorf( "expected histogram in metric %s %s", compliantName, metric, @@ -333,6 +335,12 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E } infSeen := false for _, b := range metric.Histogram.Bucket { + if b.GetCumulativeCountFloat() > 0 { + return written, fmt.Errorf( + "OpenMetrics v1.0 does not support float histogram %s %s", + compliantName, metric, + ) + } n, err = writeOpenMetricsSample( w, compliantName, "_bucket", metric, model.BucketLabel, b.GetUpperBound(), @@ -341,7 +349,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E ) written += n if err != nil { - return + return written, err } if math.IsInf(b.GetUpperBound(), +1) { infSeen = true @@ -354,9 +362,12 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E 0, metric.Histogram.GetSampleCount(), true, nil, ) + // We do not check for a float sample count here + // because we will check for it below (and error + // out if needed). written += n if err != nil { - return + return written, err } } n, err = writeOpenMetricsSample( @@ -366,7 +377,13 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E ) written += n if err != nil { - return + return written, err + } + if metric.Histogram.GetSampleCountFloat() > 0 { + return written, fmt.Errorf( + "OpenMetrics v1.0 does not support float histogram %s %s", + compliantName, metric, + ) } n, err = writeOpenMetricsSample( w, compliantName, "_count", metric, "", 0, @@ -384,10 +401,10 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E } written += n if err != nil { - return + return written, err } } - return + return written, err } // FinalizeOpenMetrics writes the final `# EOF\n` line required by OpenMetrics. diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index c4e9c1bbc3a..6b897814564 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -108,38 +108,38 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e n, err = w.WriteString("# HELP ") written += n if err != nil { - return + return written, err } n, err = writeName(w, name) written += n if err != nil { - return + return written, err } err = w.WriteByte(' ') written++ if err != nil { - return + return written, err } n, err = writeEscapedString(w, *in.Help, false) written += n if err != nil { - return + return written, err } err = w.WriteByte('\n') written++ if err != nil { - return + return written, err } } n, err = w.WriteString("# TYPE ") written += n if err != nil { - return + return written, err } n, err = writeName(w, name) written += n if err != nil { - return + return written, err } metricType := in.GetType() switch metricType { @@ -151,14 +151,17 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e n, err = w.WriteString(" summary\n") case dto.MetricType_UNTYPED: n, err = w.WriteString(" untyped\n") - case dto.MetricType_HISTOGRAM: + case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: + // The classic Prometheus text format has no notion of a gauge + // histogram. We render a gauge histogram in the same way as a + // regular histogram. n, err = w.WriteString(" histogram\n") default: return written, fmt.Errorf("unknown metric type %s", metricType.String()) } written += n if err != nil { - return + return written, err } // Finally the samples, one line for each. @@ -208,7 +211,7 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e ) written += n if err != nil { - return + return written, err } } n, err = writeSample( @@ -217,13 +220,13 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e ) written += n if err != nil { - return + return written, err } n, err = writeSample( w, name, "_count", metric, "", 0, float64(metric.Summary.GetSampleCount()), ) - case dto.MetricType_HISTOGRAM: + case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: if metric.Histogram == nil { return written, fmt.Errorf( "expected histogram in metric %s %s", name, metric, @@ -231,28 +234,36 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e } infSeen := false for _, b := range metric.Histogram.Bucket { + v := b.GetCumulativeCountFloat() + if v == 0 { + v = float64(b.GetCumulativeCount()) + } n, err = writeSample( w, name, "_bucket", metric, model.BucketLabel, b.GetUpperBound(), - float64(b.GetCumulativeCount()), + v, ) written += n if err != nil { - return + return written, err } if math.IsInf(b.GetUpperBound(), +1) { infSeen = true } } if !infSeen { + v := metric.Histogram.GetSampleCountFloat() + if v == 0 { + v = float64(metric.Histogram.GetSampleCount()) + } n, err = writeSample( w, name, "_bucket", metric, model.BucketLabel, math.Inf(+1), - float64(metric.Histogram.GetSampleCount()), + v, ) written += n if err != nil { - return + return written, err } } n, err = writeSample( @@ -261,12 +272,13 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e ) written += n if err != nil { - return + return written, err } - n, err = writeSample( - w, name, "_count", metric, "", 0, - float64(metric.Histogram.GetSampleCount()), - ) + v := metric.Histogram.GetSampleCountFloat() + if v == 0 { + v = float64(metric.Histogram.GetSampleCount()) + } + n, err = writeSample(w, name, "_count", metric, "", 0, v) default: return written, fmt.Errorf( "unexpected type in metric %s %s", name, metric, @@ -274,10 +286,10 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e } written += n if err != nil { - return + return written, err } } - return + return written, err } // writeSample writes a single sample in text format to w, given the metric diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index 8f2edde3244..00c8841a108 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -48,8 +48,10 @@ func (e ParseError) Error() string { return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg) } -// TextParser is used to parse the simple and flat text-based exchange format. Its -// zero value is ready to use. +// TextParser is used to parse the simple and flat text-based exchange format. +// +// TextParser instances must be created with NewTextParser, the zero value of +// TextParser is invalid. type TextParser struct { metricFamiliesByName map[string]*dto.MetricFamily buf *bufio.Reader // Where the parsed input is read through. @@ -129,9 +131,44 @@ func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricF if p.err != nil && errors.Is(p.err, io.EOF) { p.parseError("unexpected end of input stream") } + for _, histogramMetric := range p.histograms { + normalizeHistogram(histogramMetric.GetHistogram()) + } return p.metricFamiliesByName, p.err } +// normalizeHistogram makes sure that all the buckets and the count in each +// histogram is either completely float or completely integer. +func normalizeHistogram(histogram *dto.Histogram) { + if histogram == nil { + return + } + anyFloats := false + if histogram.GetSampleCountFloat() != 0 { + anyFloats = true + } else { + for _, b := range histogram.GetBucket() { + if b.GetCumulativeCountFloat() != 0 { + anyFloats = true + break + } + } + } + if !anyFloats { + return + } + if histogram.GetSampleCountFloat() == 0 { + histogram.SampleCountFloat = proto.Float64(float64(histogram.GetSampleCount())) + histogram.SampleCount = nil + } + for _, b := range histogram.GetBucket() { + if b.GetCumulativeCountFloat() == 0 { + b.CumulativeCountFloat = proto.Float64(float64(b.GetCumulativeCount())) + b.CumulativeCount = nil + } + } +} + func (p *TextParser) reset(in io.Reader) { p.metricFamiliesByName = map[string]*dto.MetricFamily{} p.currentLabelPairs = nil @@ -281,7 +318,9 @@ func (p *TextParser) readingLabels() stateFn { // Summaries/histograms are special. We have to reset the // currentLabels map, currentQuantile and currentBucket before starting to // read labels. - if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if p.currentMF.GetType() == dto.MetricType_SUMMARY || + p.currentMF.GetType() == dto.MetricType_HISTOGRAM || + p.currentMF.GetType() == dto.MetricType_GAUGE_HISTOGRAM { p.currentLabels = map[string]string{} p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName() p.currentQuantile = math.NaN() @@ -374,7 +413,9 @@ func (p *TextParser) startLabelName() stateFn { // Special summary/histogram treatment. Don't add 'quantile' and 'le' // labels to 'real' labels. if (p.currentMF.GetType() != dto.MetricType_SUMMARY || p.currentLabelPair.GetName() != model.QuantileLabel) && - (p.currentMF.GetType() != dto.MetricType_HISTOGRAM || p.currentLabelPair.GetName() != model.BucketLabel) { + ((p.currentMF.GetType() != dto.MetricType_HISTOGRAM && + p.currentMF.GetType() != dto.MetricType_GAUGE_HISTOGRAM) || + p.currentLabelPair.GetName() != model.BucketLabel) { p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair) } // Check for duplicate label names. @@ -425,7 +466,7 @@ func (p *TextParser) startLabelValue() stateFn { } } // Similar special treatment of histograms. - if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if p.currentMF.GetType() == dto.MetricType_HISTOGRAM || p.currentMF.GetType() == dto.MetricType_GAUGE_HISTOGRAM { if p.currentLabelPair.GetName() == model.BucketLabel { if p.currentBucket, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { // Create a more helpful error message. @@ -476,7 +517,7 @@ func (p *TextParser) readingValue() stateFn { p.summaries[signature] = p.currentMetric p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) } - case dto.MetricType_HISTOGRAM: + case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: signature := model.LabelsToSignature(p.currentLabels) if histogram := p.histograms[signature]; histogram != nil { p.currentMetric = histogram @@ -522,24 +563,38 @@ func (p *TextParser) readingValue() stateFn { }, ) } - case dto.MetricType_HISTOGRAM: + case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: // *sigh* if p.currentMetric.Histogram == nil { p.currentMetric.Histogram = &dto.Histogram{} } switch { case p.currentIsHistogramCount: - p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value)) + if uintValue := uint64(value); value == float64(uintValue) { + p.currentMetric.Histogram.SampleCount = proto.Uint64(uintValue) + } else { + if value < 0 { + p.parseError(fmt.Sprintf("negative count for histogram %q", p.currentMF.GetName())) + return nil + } + p.currentMetric.Histogram.SampleCountFloat = proto.Float64(value) + } case p.currentIsHistogramSum: p.currentMetric.Histogram.SampleSum = proto.Float64(value) case !math.IsNaN(p.currentBucket): - p.currentMetric.Histogram.Bucket = append( - p.currentMetric.Histogram.Bucket, - &dto.Bucket{ - UpperBound: proto.Float64(p.currentBucket), - CumulativeCount: proto.Uint64(uint64(value)), - }, - ) + b := &dto.Bucket{ + UpperBound: proto.Float64(p.currentBucket), + } + if uintValue := uint64(value); value == float64(uintValue) { + b.CumulativeCount = proto.Uint64(uintValue) + } else { + if value < 0 { + p.parseError(fmt.Sprintf("negative bucket population for histogram %q", p.currentMF.GetName())) + return nil + } + b.CumulativeCountFloat = proto.Float64(value) + } + p.currentMetric.Histogram.Bucket = append(p.currentMetric.Histogram.Bucket, b) } default: p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName()) @@ -602,10 +657,18 @@ func (p *TextParser) readingType() stateFn { if p.readTokenUntilNewline(false); p.err != nil { return nil // Unexpected end of input. } - metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())] + typ := strings.ToUpper(p.currentToken.String()) // Tolerate any combination of upper and lower case. + metricType, ok := dto.MetricType_value[typ] // Tolerate "gauge_histogram" (not originally part of the text format). if !ok { - p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String())) - return nil + // We also want to tolerate "gaugehistogram" to mark a gauge + // histogram, because that string is used in OpenMetrics. Note, + // however, that gauge histograms do not officially exist in the + // classic text format. + if typ != "GAUGEHISTOGRAM" { + p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String())) + return nil + } + metricType = int32(dto.MetricType_GAUGE_HISTOGRAM) } p.currentMF.Type = dto.MetricType(metricType).Enum() return p.startOfLine @@ -855,7 +918,8 @@ func (p *TextParser) setOrCreateCurrentMF() { } histogramName := histogramMetricName(name) if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil { - if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if p.currentMF.GetType() == dto.MetricType_HISTOGRAM || + p.currentMF.GetType() == dto.MetricType_GAUGE_HISTOGRAM { if isCount(name) { p.currentIsHistogramCount = true } diff --git a/vendor/github.com/shamaton/msgpack/v2/README.md b/vendor/github.com/shamaton/msgpack/v2/README.md index 97f5b7d85d7..93e3a5c85e3 100644 --- a/vendor/github.com/shamaton/msgpack/v2/README.md +++ b/vendor/github.com/shamaton/msgpack/v2/README.md @@ -6,6 +6,11 @@ [![codecov](https://codecov.io/gh/shamaton/msgpack/branch/master/graph/badge.svg?token=9PD2JUK5V3)](https://codecov.io/gh/shamaton/msgpack) [![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fshamaton%2Fmsgpack.svg?type=shield)](https://app.fossa.com/projects/git%2Bgithub.com%2Fshamaton%2Fmsgpack?ref=badge_shield) +## 📣 Announcement: `time.Time` decoding defaults to **UTC** in v3 +Starting with **v3.0.0**, when decoding MessagePack **Timestamp** into Go’s `time.Time`, +the default `Location` will be **UTC** (previously `Local`). The instant is unchanged. +To keep the old behavior, use `SetDecodedTimeAsLocal()`. + ## Features * Supported types : primitive / array / slice / struct / map / interface{} and time.Time * Renaming fields via `msgpack:"field_name"` @@ -60,6 +65,51 @@ func handle(w http.ResponseWriter, r *http.Request) { } ``` +## 📣 Announcement: `time.Time` decoding defaults to **UTC** in v3 + +**TL;DR:** Starting with **v3.0.0**, when decoding MessagePack **Timestamp** into Go’s `time.Time`, the default `Location` will be **UTC** (previously `Local`). The **instant** is unchanged—only the display/location changes. This avoids host-dependent differences and aligns with common distributed systems practice. + +### What is changing? + +* **Before (v2.x):** Decoded `time.Time` defaults to `Local`. +* **After (v3.0.0):** Decoded `time.Time` defaults to **UTC**. + +MessagePack’s Timestamp encodes an **instant** (epoch seconds + nanoseconds) and does **not** carry timezone info. Your data’s point in time is the same; only `time.Time.Location()` differs. + +### Why? + +* Eliminate environment-dependent behavior (e.g., different hosts showing different local zones). +* Make “UTC by default” the safe, predictable baseline for logs, APIs, and distributed apps. + +### Who is affected? + +* Apps that **display local time** without explicitly converting from UTC. +* If your code already normalizes to UTC or explicitly sets a location, you’re likely unaffected. + +### Keep the old behavior (Local) + +If you want the v2 behavior on v3: + +```go +msgpack.SetDecodedTimeAsLocal() +``` + +Or convert after the fact: + +```go +var t time.Time +_ = msgpack.Unmarshal(data, &t) +t = t.In(time.Local) +``` + +### Preview the new behavior on v2 (optional) + +You can opt into UTC today on v2.x: + +```go +msgpack.SetDecodedTimeAsUTC() +``` + ## Benchmark This result made from [shamaton/msgpack_bench](https://github.com/shamaton/msgpack_bench) diff --git a/vendor/github.com/shamaton/msgpack/v2/msgpack.go b/vendor/github.com/shamaton/msgpack/v2/msgpack.go index 10d125e787c..5c8d5d7a235 100644 --- a/vendor/github.com/shamaton/msgpack/v2/msgpack.go +++ b/vendor/github.com/shamaton/msgpack/v2/msgpack.go @@ -10,6 +10,7 @@ import ( "github.com/shamaton/msgpack/v2/internal/encoding" streamdecoding "github.com/shamaton/msgpack/v2/internal/stream/decoding" streamencoding "github.com/shamaton/msgpack/v2/internal/stream/encoding" + "github.com/shamaton/msgpack/v2/time" ) // StructAsArray is encoding option. @@ -82,3 +83,13 @@ func RemoveExtStreamCoder(e ext.StreamEncoder, d ext.StreamDecoder) error { func SetComplexTypeCode(code int8) { def.SetComplexTypeCode(code) } + +// SetDecodedTimeAsUTC sets decoded time.Time values to UTC timezone. +func SetDecodedTimeAsUTC() { + time.SetDecodedAsLocal(false) +} + +// SetDecodedTimeAsLocal sets decoded time.Time values to local timezone. +func SetDecodedTimeAsLocal() { + time.SetDecodedAsLocal(true) +} diff --git a/vendor/github.com/shamaton/msgpack/v2/time/decode.go b/vendor/github.com/shamaton/msgpack/v2/time/decode.go index 46876a92670..e5962d2bd69 100644 --- a/vendor/github.com/shamaton/msgpack/v2/time/decode.go +++ b/vendor/github.com/shamaton/msgpack/v2/time/decode.go @@ -48,7 +48,11 @@ func (td *timeDecoder) AsValue(offset int, k reflect.Kind, d *[]byte) (interface case def.Fixext4: _, offset = td.ReadSize1(offset, d) bs, offset := td.ReadSize4(offset, d) - return time.Unix(int64(binary.BigEndian.Uint32(bs)), 0), offset, nil + v := time.Unix(int64(binary.BigEndian.Uint32(bs)), 0) + if decodeAsLocal { + return v, offset, nil + } + return v.UTC(), offset, nil case def.Fixext8: _, offset = td.ReadSize1(offset, d) @@ -56,9 +60,13 @@ func (td *timeDecoder) AsValue(offset int, k reflect.Kind, d *[]byte) (interface data64 := binary.BigEndian.Uint64(bs) nano := int64(data64 >> 34) if nano > 999999999 { - return zero, 0, fmt.Errorf("In timestamp 64 formats, nanoseconds must not be larger than 999999999 : %d", nano) + return zero, 0, fmt.Errorf("in timestamp 64 formats, nanoseconds must not be larger than 999999999 : %d", nano) } - return time.Unix(int64(data64&0x00000003ffffffff), nano), offset, nil + v := time.Unix(int64(data64&0x00000003ffffffff), nano) + if decodeAsLocal { + return v, offset, nil + } + return v.UTC(), offset, nil case def.Ext8: _, offset = td.ReadSize1(offset, d) @@ -67,10 +75,14 @@ func (td *timeDecoder) AsValue(offset int, k reflect.Kind, d *[]byte) (interface secbs, offset := td.ReadSize8(offset, d) nano := binary.BigEndian.Uint32(nanobs) if nano > 999999999 { - return zero, 0, fmt.Errorf("In timestamp 96 formats, nanoseconds must not be larger than 999999999 : %d", nano) + return zero, 0, fmt.Errorf("in timestamp 96 formats, nanoseconds must not be larger than 999999999 : %d", nano) } sec := binary.BigEndian.Uint64(secbs) - return time.Unix(int64(sec), int64(nano)), offset, nil + v := time.Unix(int64(sec), int64(nano)) + if decodeAsLocal { + return v, offset, nil + } + return v.UTC(), offset, nil } return zero, 0, fmt.Errorf("should not reach this line!! code %x decoding %v", code, k) diff --git a/vendor/github.com/shamaton/msgpack/v2/time/decode_stream.go b/vendor/github.com/shamaton/msgpack/v2/time/decode_stream.go index 3dc945733b2..0de21c72b7b 100644 --- a/vendor/github.com/shamaton/msgpack/v2/time/decode_stream.go +++ b/vendor/github.com/shamaton/msgpack/v2/time/decode_stream.go @@ -33,7 +33,11 @@ func (td *timeStreamDecoder) IsType(code byte, innerType int8, dataLength int) b func (td *timeStreamDecoder) ToValue(code byte, data []byte, k reflect.Kind) (interface{}, error) { switch code { case def.Fixext4: - return time.Unix(int64(binary.BigEndian.Uint32(data)), 0), nil + v := time.Unix(int64(binary.BigEndian.Uint32(data)), 0) + if decodeAsLocal { + return v, nil + } + return v.UTC(), nil case def.Fixext8: data64 := binary.BigEndian.Uint64(data) @@ -41,7 +45,11 @@ func (td *timeStreamDecoder) ToValue(code byte, data []byte, k reflect.Kind) (in if nano > 999999999 { return zero, fmt.Errorf("in timestamp 64 formats, nanoseconds must not be larger than 999999999 : %d", nano) } - return time.Unix(int64(data64&0x00000003ffffffff), nano), nil + v := time.Unix(int64(data64&0x00000003ffffffff), nano) + if decodeAsLocal { + return v, nil + } + return v.UTC(), nil case def.Ext8: nano := binary.BigEndian.Uint32(data[:4]) @@ -49,7 +57,11 @@ func (td *timeStreamDecoder) ToValue(code byte, data []byte, k reflect.Kind) (in return zero, fmt.Errorf("in timestamp 96 formats, nanoseconds must not be larger than 999999999 : %d", nano) } sec := binary.BigEndian.Uint64(data[4:12]) - return time.Unix(int64(sec), int64(nano)), nil + v := time.Unix(int64(sec), int64(nano)) + if decodeAsLocal { + return v, nil + } + return v.UTC(), nil } return zero, fmt.Errorf("should not reach this line!! code %x decoding %v", code, k) diff --git a/vendor/github.com/shamaton/msgpack/v2/time/time.go b/vendor/github.com/shamaton/msgpack/v2/time/time.go new file mode 100644 index 00000000000..233bebfc5e8 --- /dev/null +++ b/vendor/github.com/shamaton/msgpack/v2/time/time.go @@ -0,0 +1,8 @@ +package time + +var decodeAsLocal = true + +// SetDecodedAsLocal sets the decoded time to local time. +func SetDecodedAsLocal(b bool) { + decodeAsLocal = b +} diff --git a/vendor/go.etcd.io/etcd/api/v3/version/version.go b/vendor/go.etcd.io/etcd/api/v3/version/version.go index 9e7bc64c17e..c0798b6854d 100644 --- a/vendor/go.etcd.io/etcd/api/v3/version/version.go +++ b/vendor/go.etcd.io/etcd/api/v3/version/version.go @@ -26,7 +26,7 @@ import ( var ( // MinClusterVersion is the min cluster version this etcd binary is compatible with. MinClusterVersion = "3.0.0" - Version = "3.6.5" + Version = "3.6.7" APIVersion = "unknown" // Git SHA Value will be set during build diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go index e854d7e84e8..2950fdb42ee 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go @@ -82,7 +82,7 @@ func marshalJSON(id []byte) ([]byte, error) { } // unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. -func unmarshalJSON(dst []byte, src []byte) error { +func unmarshalJSON(dst, src []byte) error { if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' { src = src[1 : l-1] } diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go index 29e629d6674..5bb3b16c704 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go @@ -41,7 +41,7 @@ func (i *protoInt64) UnmarshalJSON(data []byte) error { // strings or integers. type protoUint64 uint64 -// Int64 returns the protoUint64 as a uint64. +// Uint64 returns the protoUint64 as a uint64. func (i *protoUint64) Uint64() uint64 { return uint64(*i) } // UnmarshalJSON decodes both strings and integers. diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go index a13a6b733da..67f80b6aa07 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go @@ -10,6 +10,7 @@ import ( "errors" "fmt" "io" + "math" "time" ) @@ -151,8 +152,8 @@ func (s Span) MarshalJSON() ([]byte, error) { }{ Alias: Alias(s), ParentSpanID: parentSpanId, - StartTime: uint64(startT), - EndTime: uint64(endT), + StartTime: uint64(startT), // nolint:gosec // >0 checked above. + EndTime: uint64(endT), // nolint:gosec // >0 checked above. }) } @@ -201,11 +202,13 @@ func (s *Span) UnmarshalJSON(data []byte) error { case "startTimeUnixNano", "start_time_unix_nano": var val protoUint64 err = decoder.Decode(&val) - s.StartTime = time.Unix(0, int64(val.Uint64())) + v := int64(min(val.Uint64(), math.MaxInt64)) //nolint:gosec // Overflow checked. + s.StartTime = time.Unix(0, v) case "endTimeUnixNano", "end_time_unix_nano": var val protoUint64 err = decoder.Decode(&val) - s.EndTime = time.Unix(0, int64(val.Uint64())) + v := int64(min(val.Uint64(), math.MaxInt64)) //nolint:gosec // Overflow checked. + s.EndTime = time.Unix(0, v) case "attributes": err = decoder.Decode(&s.Attrs) case "droppedAttributesCount", "dropped_attributes_count": @@ -248,13 +251,20 @@ func (s *Span) UnmarshalJSON(data []byte) error { type SpanFlags int32 const ( + // SpanFlagsTraceFlagsMask is a mask for trace-flags. + // // Bits 0-7 are used for trace flags. SpanFlagsTraceFlagsMask SpanFlags = 255 - // Bits 8 and 9 are used to indicate that the parent span or link span is remote. - // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. - // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. + // SpanFlagsContextHasIsRemoteMask is a mask for HAS_IS_REMOTE status. + // + // Bits 8 and 9 are used to indicate that the parent span or link span is + // remote. Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. SpanFlagsContextHasIsRemoteMask SpanFlags = 256 - // SpanFlagsContextHasIsRemoteMask indicates the Span is remote. + // SpanFlagsContextIsRemoteMask is a mask for IS_REMOTE status. + // + // Bits 8 and 9 are used to indicate that the parent span or link span is + // remote. Bit 9 (`IS_REMOTE`) indicates whether the span or link is + // remote. SpanFlagsContextIsRemoteMask SpanFlags = 512 ) @@ -263,26 +273,30 @@ const ( type SpanKind int32 const ( - // Indicates that the span represents an internal operation within an application, - // as opposed to an operation happening at the boundaries. Default value. + // SpanKindInternal indicates that the span represents an internal + // operation within an application, as opposed to an operation happening at + // the boundaries. SpanKindInternal SpanKind = 1 - // Indicates that the span covers server-side handling of an RPC or other - // remote network request. + // SpanKindServer indicates that the span covers server-side handling of an + // RPC or other remote network request. SpanKindServer SpanKind = 2 - // Indicates that the span describes a request to some remote service. + // SpanKindClient indicates that the span describes a request to some + // remote service. SpanKindClient SpanKind = 3 - // Indicates that the span describes a producer sending a message to a broker. - // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship - // between producer and consumer spans. A PRODUCER span ends when the message was accepted - // by the broker while the logical processing of the message might span a much longer time. + // SpanKindProducer indicates that the span describes a producer sending a + // message to a broker. Unlike SpanKindClient and SpanKindServer, there is + // often no direct critical path latency relationship between producer and + // consumer spans. A SpanKindProducer span ends when the message was + // accepted by the broker while the logical processing of the message might + // span a much longer time. SpanKindProducer SpanKind = 4 - // Indicates that the span describes consumer receiving a message from a broker. - // Like the PRODUCER kind, there is often no direct critical path latency relationship - // between producer and consumer spans. + // SpanKindConsumer indicates that the span describes a consumer receiving + // a message from a broker. Like SpanKindProducer, there is often no direct + // critical path latency relationship between producer and consumer spans. SpanKindConsumer SpanKind = 5 ) -// Event is a time-stamped annotation of the span, consisting of user-supplied +// SpanEvent is a time-stamped annotation of the span, consisting of user-supplied // text description and key-value pairs. type SpanEvent struct { // time_unix_nano is the time the event occurred. @@ -312,7 +326,7 @@ func (e SpanEvent) MarshalJSON() ([]byte, error) { Time uint64 `json:"timeUnixNano,omitempty"` }{ Alias: Alias(e), - Time: uint64(t), + Time: uint64(t), //nolint:gosec // >0 checked above }) } @@ -347,7 +361,8 @@ func (se *SpanEvent) UnmarshalJSON(data []byte) error { case "timeUnixNano", "time_unix_nano": var val protoUint64 err = decoder.Decode(&val) - se.Time = time.Unix(0, int64(val.Uint64())) + v := int64(min(val.Uint64(), math.MaxInt64)) //nolint:gosec // Overflow checked. + se.Time = time.Unix(0, v) case "name": err = decoder.Decode(&se.Name) case "attributes": @@ -365,10 +380,11 @@ func (se *SpanEvent) UnmarshalJSON(data []byte) error { return nil } -// A pointer from the current span to another span in the same trace or in a -// different trace. For example, this can be used in batching operations, -// where a single batch handler processes multiple requests from different -// traces or when the handler receives a request from a different project. +// SpanLink is a reference from the current span to another span in the same +// trace or in a different trace. For example, this can be used in batching +// operations, where a single batch handler processes multiple requests from +// different traces or when the handler receives a request from a different +// project. type SpanLink struct { // A unique identifier of a trace that this linked span is part of. The ID is a // 16-byte array. diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go index 1217776ead1..a2802764f81 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go @@ -3,17 +3,19 @@ package telemetry +// StatusCode is the status of a Span. +// // For the semantics of status codes see // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status type StatusCode int32 const ( - // The default status. + // StatusCodeUnset is the default status. StatusCodeUnset StatusCode = 0 - // The Span has been validated by an Application developer or Operator to - // have completed successfully. + // StatusCodeOK is used when the Span has been validated by an Application + // developer or Operator to have completed successfully. StatusCodeOK StatusCode = 1 - // The Span contains an error. + // StatusCodeError is used when the Span contains an error. StatusCodeError StatusCode = 2 ) diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go index 69a348f0f06..44197b80849 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go @@ -71,7 +71,7 @@ func (td *Traces) UnmarshalJSON(data []byte) error { return nil } -// A collection of ScopeSpans from a Resource. +// ResourceSpans is a collection of ScopeSpans from a Resource. type ResourceSpans struct { // The resource for the spans in this message. // If this field is not set then no resource info is known. @@ -128,7 +128,7 @@ func (rs *ResourceSpans) UnmarshalJSON(data []byte) error { return nil } -// A collection of Spans produced by an InstrumentationScope. +// ScopeSpans is a collection of Spans produced by an InstrumentationScope. type ScopeSpans struct { // The instrumentation scope information for the spans in this message. // Semantically when InstrumentationScope isn't set, it is equivalent with diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go index 0dd01b063a3..022768bb501 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go @@ -1,8 +1,6 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -//go:generate stringer -type=ValueKind -trimprefix=ValueKind - package telemetry import ( @@ -23,7 +21,7 @@ import ( // A zero value is valid and represents an empty value. type Value struct { // Ensure forward compatibility by explicitly making this not comparable. - noCmp [0]func() //nolint: unused // This is indeed used. + noCmp [0]func() //nolint:unused // This is indeed used. // num holds the value for Int64, Float64, and Bool. It holds the length // for String, Bytes, Slice, Map. @@ -92,7 +90,7 @@ func IntValue(v int) Value { return Int64Value(int64(v)) } // Int64Value returns a [Value] for an int64. func Int64Value(v int64) Value { - return Value{num: uint64(v), any: ValueKindInt64} + return Value{num: uint64(v), any: ValueKindInt64} //nolint:gosec // Raw value conv. } // Float64Value returns a [Value] for a float64. @@ -164,7 +162,7 @@ func (v Value) AsInt64() int64 { // this will return garbage. func (v Value) asInt64() int64 { // Assumes v.num was a valid int64 (overflow not checked). - return int64(v.num) // nolint: gosec + return int64(v.num) //nolint:gosec // Bounded. } // AsBool returns the value held by v as a bool. @@ -309,13 +307,13 @@ func (v Value) String() string { return v.asString() case ValueKindInt64: // Assumes v.num was a valid int64 (overflow not checked). - return strconv.FormatInt(int64(v.num), 10) // nolint: gosec + return strconv.FormatInt(int64(v.num), 10) //nolint:gosec // Bounded. case ValueKindFloat64: return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64) case ValueKindBool: return strconv.FormatBool(v.asBool()) case ValueKindBytes: - return fmt.Sprint(v.asBytes()) + return string(v.asBytes()) case ValueKindMap: return fmt.Sprint(v.asMap()) case ValueKindSlice: @@ -343,7 +341,7 @@ func (v *Value) MarshalJSON() ([]byte, error) { case ValueKindInt64: return json.Marshal(struct { Value string `json:"intValue"` - }{strconv.FormatInt(int64(v.num), 10)}) + }{strconv.FormatInt(int64(v.num), 10)}) //nolint:gosec // Raw value conv. case ValueKindFloat64: return json.Marshal(struct { Value float64 `json:"doubleValue"` diff --git a/vendor/go.opentelemetry.io/auto/sdk/span.go b/vendor/go.opentelemetry.io/auto/sdk/span.go index 6ebea12a9e9..815d271ffb2 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/span.go +++ b/vendor/go.opentelemetry.io/auto/sdk/span.go @@ -6,6 +6,7 @@ package sdk import ( "encoding/json" "fmt" + "math" "reflect" "runtime" "strings" @@ -16,7 +17,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/noop" @@ -85,7 +86,12 @@ func (s *span) SetAttributes(attrs ...attribute.KeyValue) { limit := maxSpan.Attrs if limit == 0 { // No attributes allowed. - s.span.DroppedAttrs += uint32(len(attrs)) + n := int64(len(attrs)) + if n > 0 { + s.span.DroppedAttrs += uint32( //nolint:gosec // Bounds checked. + min(n, math.MaxUint32), + ) + } return } @@ -121,8 +127,13 @@ func (s *span) SetAttributes(attrs ...attribute.KeyValue) { // convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The // number of dropped attributes is also returned. func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) { + n := len(attrs) if limit == 0 { - return nil, uint32(len(attrs)) + var out uint32 + if n > 0 { + out = uint32(min(int64(n), math.MaxUint32)) //nolint:gosec // Bounds checked. + } + return nil, out } if limit < 0 { @@ -130,8 +141,12 @@ func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, u return convAttrs(attrs), 0 } - limit = min(len(attrs), limit) - return convAttrs(attrs[:limit]), uint32(len(attrs) - limit) + if n < 0 { + n = 0 + } + + limit = min(n, limit) + return convAttrs(attrs[:limit]), uint32(n - limit) //nolint:gosec // Bounds checked. } func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr { diff --git a/vendor/go.opentelemetry.io/auto/sdk/tracer.go b/vendor/go.opentelemetry.io/auto/sdk/tracer.go index cbcfabde3b1..e09acf022fa 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/tracer.go +++ b/vendor/go.opentelemetry.io/auto/sdk/tracer.go @@ -5,6 +5,7 @@ package sdk import ( "context" + "math" "time" "go.opentelemetry.io/otel/trace" @@ -21,15 +22,20 @@ type tracer struct { var _ trace.Tracer = tracer{} -func (t tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { - var psc trace.SpanContext +func (t tracer) Start( + ctx context.Context, + name string, + opts ...trace.SpanStartOption, +) (context.Context, trace.Span) { + var psc, sc trace.SpanContext sampled := true span := new(span) // Ask eBPF for sampling decision and span context info. - t.start(ctx, span, &psc, &sampled, &span.spanContext) + t.start(ctx, span, &psc, &sampled, &sc) span.sampled.Store(sampled) + span.spanContext = sc ctx = trace.ContextWithSpan(ctx, span) @@ -58,7 +64,13 @@ func (t *tracer) start( // start is used for testing. var start = func(context.Context, *span, *trace.SpanContext, *bool, *trace.SpanContext) {} -func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanContext) (*telemetry.Traces, *telemetry.Span) { +var intToUint32Bound = min(math.MaxInt, math.MaxUint32) + +func (t tracer) traces( + name string, + cfg trace.SpanConfig, + sc, psc trace.SpanContext, +) (*telemetry.Traces, *telemetry.Span) { span := &telemetry.Span{ TraceID: telemetry.TraceID(sc.TraceID()), SpanID: telemetry.SpanID(sc.SpanID()), @@ -73,11 +85,16 @@ func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanCont links := cfg.Links() if limit := maxSpan.Links; limit == 0 { - span.DroppedLinks = uint32(len(links)) + n := len(links) + if n > 0 { + bounded := max(min(n, intToUint32Bound), 0) + span.DroppedLinks = uint32(bounded) //nolint:gosec // Bounds checked. + } } else { if limit > 0 { n := max(len(links)-limit, 0) - span.DroppedLinks = uint32(n) + bounded := min(n, intToUint32Bound) + span.DroppedLinks = uint32(bounded) //nolint:gosec // Bounds checked. links = links[n:] } span.Links = convLinks(links) diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go index e65c4907c9b..2dc8eaea93a 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go @@ -52,6 +52,12 @@ type Option interface { apply(*config) } +type optionFunc func(*config) + +func (f optionFunc) apply(c *config) { + f(c) +} + // newConfig returns a config configured with all the passed Options. func newConfig(opts []Option) *config { c := &config{ @@ -65,27 +71,13 @@ func newConfig(opts []Option) *config { return c } -type publicEndpointOption struct{ p bool } - -func (o publicEndpointOption) apply(c *config) { - c.PublicEndpoint = o.p -} - // WithPublicEndpoint configures the Handler to link the span with an incoming // span context. If this option is not provided, then the association is a child // association instead of a link. func WithPublicEndpoint() Option { - return publicEndpointOption{p: true} -} - -type publicEndpointFnOption struct { - fn func(context.Context, *stats.RPCTagInfo) bool -} - -func (o publicEndpointFnOption) apply(c *config) { - if o.fn != nil { - c.PublicEndpointFn = o.fn - } + return optionFunc(func(c *config) { + c.PublicEndpoint = true + }) } // WithPublicEndpointFn runs with every request, and allows conditionally @@ -94,81 +86,55 @@ func (o publicEndpointFnOption) apply(c *config) { // child association instead of a link. // Note: WithPublicEndpoint takes precedence over WithPublicEndpointFn. func WithPublicEndpointFn(fn func(context.Context, *stats.RPCTagInfo) bool) Option { - return publicEndpointFnOption{fn: fn} -} - -type propagatorsOption struct{ p propagation.TextMapPropagator } - -func (o propagatorsOption) apply(c *config) { - if o.p != nil { - c.Propagators = o.p - } + return optionFunc(func(c *config) { + c.PublicEndpointFn = fn + }) } // WithPropagators returns an Option to use the Propagators when extracting // and injecting trace context from requests. func WithPropagators(p propagation.TextMapPropagator) Option { - return propagatorsOption{p: p} -} - -type tracerProviderOption struct{ tp trace.TracerProvider } - -func (o tracerProviderOption) apply(c *config) { - if o.tp != nil { - c.TracerProvider = o.tp - } + return optionFunc(func(c *config) { + if p != nil { + c.Propagators = p + } + }) } // WithInterceptorFilter returns an Option to use the request filter. // // Deprecated: Use stats handlers instead. func WithInterceptorFilter(f InterceptorFilter) Option { - return interceptorFilterOption{f: f} -} - -type interceptorFilterOption struct { - f InterceptorFilter -} - -func (o interceptorFilterOption) apply(c *config) { - if o.f != nil { - c.InterceptorFilter = o.f - } + return optionFunc(func(c *config) { + if f != nil { + c.InterceptorFilter = f + } + }) } // WithFilter returns an Option to use the request filter. func WithFilter(f Filter) Option { - return filterOption{f: f} -} - -type filterOption struct { - f Filter -} - -func (o filterOption) apply(c *config) { - if o.f != nil { - c.Filter = o.f - } + return optionFunc(func(c *config) { + if f != nil { + c.Filter = f + } + }) } // WithTracerProvider returns an Option to use the TracerProvider when // creating a Tracer. func WithTracerProvider(tp trace.TracerProvider) Option { - return tracerProviderOption{tp: tp} -} - -type meterProviderOption struct{ mp metric.MeterProvider } - -func (o meterProviderOption) apply(c *config) { - if o.mp != nil { - c.MeterProvider = o.mp - } + return optionFunc(func(c *config) { + c.TracerProvider = tp + }) } // WithMeterProvider returns an Option to use the MeterProvider when // creating a Meter. If this option is not provide the global MeterProvider will be used. func WithMeterProvider(mp metric.MeterProvider) Option { - return meterProviderOption{mp: mp} + return optionFunc(func(c *config) { + c.MeterProvider = mp + }) } // Event type that can be recorded, see WithMessageEvents. @@ -180,21 +146,6 @@ const ( SentEvents ) -type messageEventsProviderOption struct { - events []Event -} - -func (m messageEventsProviderOption) apply(c *config) { - for _, e := range m.events { - switch e { - case ReceivedEvents: - c.ReceivedEvent = true - case SentEvents: - c.SentEvent = true - } - } -} - // WithMessageEvents configures the Handler to record the specified events // (span.AddEvent) on spans. By default only summary attributes are added at the // end of the request. @@ -203,13 +154,16 @@ func (m messageEventsProviderOption) apply(c *config) { // - ReceivedEvents: Record the number of bytes read after every gRPC read operation. // - SentEvents: Record the number of bytes written after every gRPC write operation. func WithMessageEvents(events ...Event) Option { - return messageEventsProviderOption{events: events} -} - -type spanStartOption struct{ opts []trace.SpanStartOption } - -func (o spanStartOption) apply(c *config) { - c.SpanStartOptions = append(c.SpanStartOptions, o.opts...) + return optionFunc(func(c *config) { + for _, e := range events { + switch e { + case ReceivedEvents: + c.ReceivedEvent = true + case SentEvents: + c.SentEvent = true + } + } + }) } // WithSpanOptions configures an additional set of @@ -217,31 +171,25 @@ func (o spanStartOption) apply(c *config) { // // Deprecated: It is only used by the deprecated interceptor, and is unused by [NewClientHandler] and [NewServerHandler]. func WithSpanOptions(opts ...trace.SpanStartOption) Option { - return spanStartOption{opts} -} - -type spanAttributesOption struct{ a []attribute.KeyValue } - -func (o spanAttributesOption) apply(c *config) { - if o.a != nil { - c.SpanAttributes = o.a - } + return optionFunc(func(c *config) { + c.SpanStartOptions = append(c.SpanStartOptions, opts...) + }) } // WithSpanAttributes returns an Option to add custom attributes to the spans. func WithSpanAttributes(a ...attribute.KeyValue) Option { - return spanAttributesOption{a: a} -} - -type metricAttributesOption struct{ a []attribute.KeyValue } - -func (o metricAttributesOption) apply(c *config) { - if o.a != nil { - c.MetricAttributes = o.a - } + return optionFunc(func(c *config) { + if a != nil { + c.SpanAttributes = a + } + }) } // WithMetricAttributes returns an Option to add custom attributes to the metrics. func WithMetricAttributes(a ...attribute.KeyValue) Option { - return metricAttributesOption{a: a} + return optionFunc(func(c *config) { + if a != nil { + c.MetricAttributes = append(c.MetricAttributes, a...) + } + }) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go index b427e172470..4c62341d664 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go @@ -6,9 +6,7 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g import ( "context" - "go.opentelemetry.io/otel/baggage" "go.opentelemetry.io/otel/propagation" - "go.opentelemetry.io/otel/trace" "google.golang.org/grpc/metadata" ) @@ -17,9 +15,9 @@ type metadataSupplier struct { } // assert that metadataSupplier implements the TextMapCarrier interface. -var _ propagation.TextMapCarrier = &metadataSupplier{} +var _ propagation.TextMapCarrier = metadataSupplier{} -func (s *metadataSupplier) Get(key string) string { +func (s metadataSupplier) Get(key string) string { values := s.metadata.Get(key) if len(values) == 0 { return "" @@ -27,11 +25,11 @@ func (s *metadataSupplier) Get(key string) string { return values[0] } -func (s *metadataSupplier) Set(key, value string) { +func (s metadataSupplier) Set(key, value string) { s.metadata.Set(key, value) } -func (s *metadataSupplier) Keys() []string { +func (s metadataSupplier) Keys() []string { out := make([]string, 0, len(s.metadata)) for key := range s.metadata { out = append(out, key) @@ -39,50 +37,24 @@ func (s *metadataSupplier) Keys() []string { return out } -// Inject injects correlation context and span context into the gRPC -// metadata object. This function is meant to be used on outgoing -// requests. -// -// Deprecated: Unnecessary public func. -func Inject(ctx context.Context, md *metadata.MD, opts ...Option) { - c := newConfig(opts) - c.Propagators.Inject(ctx, &metadataSupplier{ - metadata: *md, - }) -} - func inject(ctx context.Context, propagators propagation.TextMapPropagator) context.Context { md, ok := metadata.FromOutgoingContext(ctx) if !ok { md = metadata.MD{} } - propagators.Inject(ctx, &metadataSupplier{ + propagators.Inject(ctx, metadataSupplier{ metadata: md, }) return metadata.NewOutgoingContext(ctx, md) } -// Extract returns the correlation context and span context that -// another service encoded in the gRPC metadata object with Inject. -// This function is meant to be used on incoming requests. -// -// Deprecated: Unnecessary public func. -func Extract(ctx context.Context, md *metadata.MD, opts ...Option) (baggage.Baggage, trace.SpanContext) { - c := newConfig(opts) - ctx = c.Propagators.Extract(ctx, &metadataSupplier{ - metadata: *md, - }) - - return baggage.FromContext(ctx), trace.SpanContextFromContext(ctx) -} - func extract(ctx context.Context, propagators propagation.TextMapPropagator) context.Context { md, ok := metadata.FromIncomingContext(ctx) if !ok { md = metadata.MD{} } - return propagators.Extract(ctx, &metadataSupplier{ + return propagators.Extract(ctx, metadataSupplier{ metadata: md, }) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go index 29d7ab2bdac..278f6d0d99e 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go @@ -26,10 +26,11 @@ import ( type gRPCContextKey struct{} type gRPCContext struct { - inMessages int64 - outMessages int64 - metricAttrs []attribute.KeyValue - record bool + inMessages int64 + outMessages int64 + metricAttrs []attribute.KeyValue + metricAttrSet attribute.Set + record bool } type serverHandler struct { @@ -38,8 +39,8 @@ type serverHandler struct { tracer trace.Tracer duration rpcconv.ServerDuration - inSize rpcconv.ServerRequestSize - outSize rpcconv.ServerResponseSize + inSize int64Hist + outSize int64Hist inMsg rpcconv.ServerRequestsPerRPC outMsg rpcconv.ServerResponsesPerRPC } @@ -111,9 +112,12 @@ func (h *serverHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont } if record { + // Make a new slice to avoid aliasing into the same attrs slice used by metrics. + spanAttributes := make([]attribute.KeyValue, 0, len(attrs)+len(h.SpanAttributes)) + spanAttributes = append(append(spanAttributes, attrs...), h.SpanAttributes...) opts := []trace.SpanStartOption{ trace.WithSpanKind(trace.SpanKindServer), - trace.WithAttributes(append(attrs, h.SpanAttributes...)...), + trace.WithAttributes(spanAttributes...), } if h.PublicEndpoint || (h.PublicEndpointFn != nil && h.PublicEndpointFn(ctx, info)) { opts = append(opts, trace.WithNewRoot()) @@ -133,6 +137,7 @@ func (h *serverHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont metricAttrs: append(attrs, h.MetricAttributes...), record: record, } + gctx.metricAttrSet = attribute.NewSet(gctx.metricAttrs...) return context.WithValue(ctx, gRPCContextKey{}, &gctx) } @@ -157,8 +162,8 @@ type clientHandler struct { tracer trace.Tracer duration rpcconv.ClientDuration - inSize rpcconv.ClientResponseSize - outSize rpcconv.ClientRequestSize + inSize int64Hist + outSize int64Hist inMsg rpcconv.ClientResponsesPerRPC outMsg rpcconv.ClientRequestsPerRPC } @@ -219,11 +224,14 @@ func (h *clientHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont } if record { + // Make a new slice to avoid aliasing into the same attrs slice used by metrics. + spanAttributes := make([]attribute.KeyValue, 0, len(attrs)+len(h.SpanAttributes)) + spanAttributes = append(append(spanAttributes, attrs...), h.SpanAttributes...) ctx, _ = h.tracer.Start( ctx, name, trace.WithSpanKind(trace.SpanKindClient), - trace.WithAttributes(append(attrs, h.SpanAttributes...)...), + trace.WithAttributes(spanAttributes...), ) } @@ -231,6 +239,7 @@ func (h *clientHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont metricAttrs: append(attrs, h.MetricAttributes...), record: record, } + gctx.metricAttrSet = attribute.NewSet(gctx.metricAttrs...) return inject(context.WithValue(ctx, gRPCContextKey{}, &gctx), h.Propagators) } @@ -262,7 +271,7 @@ func (*clientHandler) HandleConn(context.Context, stats.ConnStats) { } type int64Hist interface { - Record(context.Context, int64, ...attribute.KeyValue) + RecordSet(context.Context, int64, attribute.Set) } func (c *config) handleRPC( @@ -286,7 +295,7 @@ func (c *config) handleRPC( case *stats.InPayload: if gctx != nil { messageId = atomic.AddInt64(&gctx.inMessages, 1) - inSize.Record(ctx, int64(rs.Length), gctx.metricAttrs...) + inSize.RecordSet(ctx, int64(rs.Length), gctx.metricAttrSet) } if c.ReceivedEvent && span.IsRecording() { @@ -302,7 +311,7 @@ func (c *config) handleRPC( case *stats.OutPayload: if gctx != nil { messageId = atomic.AddInt64(&gctx.outMessages, 1) - outSize.Record(ctx, int64(rs.Length), gctx.metricAttrs...) + outSize.RecordSet(ctx, int64(rs.Length), gctx.metricAttrSet) } if c.SentEvent && span.IsRecording() { @@ -343,6 +352,9 @@ func (c *config) handleRPC( var metricAttrs []attribute.KeyValue if gctx != nil { + // Don't use gctx.metricAttrSet here, because it requires passing + // multiple RecordOptions, which would call metric.mergeSets and + // allocate a new set for each Record call. metricAttrs = make([]attribute.KeyValue, 0, len(gctx.metricAttrs)+1) metricAttrs = append(metricAttrs, gctx.metricAttrs...) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go index aa4f4e2129c..98f148be5dd 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go @@ -5,6 +5,6 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g // Version is the current release version of the gRPC instrumentation. func Version() string { - return "0.63.0" + return "0.64.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore index 2b53a25e1e1..a6d0cbcc9e8 100644 --- a/vendor/go.opentelemetry.io/otel/.codespellignore +++ b/vendor/go.opentelemetry.io/otel/.codespellignore @@ -8,3 +8,4 @@ nam valu thirdparty addOpt +observ diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index b01762ffcc7..1b1b2aff9a4 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -197,6 +197,9 @@ linters: - float-compare - go-require - require-error + usetesting: + context-background: true + context-todo: true exclusions: generated: lax presets: diff --git a/vendor/go.opentelemetry.io/otel/.lycheeignore b/vendor/go.opentelemetry.io/otel/.lycheeignore index 5328505888d..994b677df7f 100644 --- a/vendor/go.opentelemetry.io/otel/.lycheeignore +++ b/vendor/go.opentelemetry.io/otel/.lycheeignore @@ -1,4 +1,5 @@ http://localhost +https://localhost http://jaeger-collector https://github.com/open-telemetry/opentelemetry-go/milestone/ https://github.com/open-telemetry/opentelemetry-go/projects @@ -6,4 +7,7 @@ https://github.com/open-telemetry/opentelemetry-go/projects https?:\/\/github\.com\/open-telemetry\/semantic-conventions\/archive\/refs\/tags\/[^.]+\.zip\[[^]]+] file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual -http://4.3.2.1:78/user/123 \ No newline at end of file +http://4.3.2.1:78/user/123 +file:///home/runner/work/opentelemetry-go/opentelemetry-go/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/dns:/:4317 +# URL works, but it has blocked link checkers. +https://dl.acm.org/doi/10.1145/198429.198435 diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index f3abcfdc2e3..ecbe0582c48 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -11,6 +11,74 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm +## [1.39.0/0.61.0/0.15.0/0.0.14] 2025-12-05 + +### Added + +- Greatly reduce the cost of recording metrics in `go.opentelemetry.io/otel/sdk/metric` using hashing for map keys. (#7175) +- Add `WithInstrumentationAttributeSet` option to `go.opentelemetry.io/otel/log`, `go.opentelemetry.io/otel/metric`, and `go.opentelemetry.io/otel/trace` packages. + This provides a concurrent-safe and performant alternative to `WithInstrumentationAttributes` by accepting a pre-constructed `attribute.Set`. (#7287) +- Add experimental observability for the Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus`. + Check the `go.opentelemetry.io/otel/exporters/prometheus/internal/x` package documentation for more information. (#7345) +- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#7353) +- Add temporality selector functions `DeltaTemporalitySelector`, `CumulativeTemporalitySelector`, `LowMemoryTemporalitySelector` to `go.opentelemetry.io/otel/sdk/metric`. (#7434) +- Add experimental observability metrics for simple log processor in `go.opentelemetry.io/otel/sdk/log`. (#7548) +- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#7459) +- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#7486) +- Add experimental observability metrics for simple span processor in `go.opentelemetry.io/otel/sdk/trace`. (#7374) +- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#7512) +- Add experimental observability metrics for manual reader in `go.opentelemetry.io/otel/sdk/metric`. (#7524) +- Add experimental observability metrics for periodic reader in `go.opentelemetry.io/otel/sdk/metric`. (#7571) +- Support `OTEL_EXPORTER_OTLP_LOGS_INSECURE` and `OTEL_EXPORTER_OTLP_INSECURE` environmental variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#7608) +- Add `Enabled` method to the `Processor` interface in `go.opentelemetry.io/otel/sdk/log`. + All `Processor` implementations now include an `Enabled` method. (#7639) +- The `go.opentelemetry.io/otel/semconv/v1.38.0` package. + The package contains semantic conventions from the `v1.38.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.38.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.37.0.`(#7648) + +### Changed + +- `Distinct` in `go.opentelemetry.io/otel/attribute` is no longer guaranteed to uniquely identify an attribute set. + Collisions between `Distinct` values for different Sets are possible with extremely high cardinality (billions of series per instrument), but are highly unlikely. (#7175) +- `WithInstrumentationAttributes` in `go.opentelemetry.io/otel/trace` synchronously de-duplicates the passed attributes instead of delegating it to the returned `TracerOption`. (#7266) +- `WithInstrumentationAttributes` in `go.opentelemetry.io/otel/meter` synchronously de-duplicates the passed attributes instead of delegating it to the returned `MeterOption`. (#7266) +- `WithInstrumentationAttributes` in `go.opentelemetry.io/otel/log` synchronously de-duplicates the passed attributes instead of delegating it to the returned `LoggerOption`. (#7266) +- Rename the `OTEL_GO_X_SELF_OBSERVABILITY` environment variable to `OTEL_GO_X_OBSERVABILITY` in `go.opentelemetry.io/otel/sdk/trace`, `go.opentelemetry.io/otel/sdk/log`, and `go.opentelemetry.io/otel/exporters/stdout/stdouttrace`. (#7302) +- Improve performance of histogram `Record` in `go.opentelemetry.io/otel/sdk/metric` when min and max are disabled using `NoMinMax`. (#7306) +- Improve error handling for dropped data during translation by using `prometheus.NewInvalidMetric` in `go.opentelemetry.io/otel/exporters/prometheus`. + ⚠️ **Breaking Change:** Previously, these cases were only logged and scrapes succeeded. + Now, when translation would drop data (e.g., invalid label/value), the exporter emits a `NewInvalidMetric`, and Prometheus scrapes **fail with HTTP 500** by default. + To preserve the prior behavior (scrapes succeed while errors are logged), configure your Prometheus HTTP handler with: `promhttp.HandlerOpts{ ErrorHandling: promhttp.ContinueOnError }`. (#7363) +- Replace fnv hash with xxhash in `go.opentelemetry.io/otel/attribute` for better performance. (#7371) +- The default `TranslationStrategy` in `go.opentelemetry.io/exporters/prometheus` is changed from `otlptranslator.NoUTF8EscapingWithSuffixes` to `otlptranslator.UnderscoreEscapingWithSuffixes`. (#7421) +- Improve performance of concurrent measurements in `go.opentelemetry.io/otel/sdk/metric`. (#7427) +- Include W3C TraceFlags (bits 0–7) in the OTLP `Span.Flags` field in `go.opentelemetry.io/exporters/otlp/otlptrace/otlptracehttp` and `go.opentelemetry.io/exporters/otlp/otlptrace/otlptracegrpc`. (#7438) +- The `ErrorType` function in `go.opentelemetry.io/otel/semconv/v1.37.0` now handles custom error types. + If an error implements an `ErrorType() string` method, the return value of that method will be used as the error type. (#7442) + +### Fixed + +- Fix `WithInstrumentationAttributes` options in `go.opentelemetry.io/otel/trace`, `go.opentelemetry.io/otel/metric`, and `go.opentelemetry.io/otel/log` to properly merge attributes when passed multiple times instead of replacing them. + Attributes with duplicate keys will use the last value passed. (#7300) +- The equality of `attribute.Set` when using the `Equal` method is not affected by the user overriding the empty set pointed to by `attribute.EmptySet` in `go.opentelemetry.io/otel/attribute`. (#7357) +- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#7372) +- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#7372) +- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#7372) +- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#7372) +- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#7372) +- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#7372) +- Fix `AddAttributes`, `SetAttributes`, `SetBody` on `Record` in `go.opentelemetry.io/otel/sdk/log` to not mutate input. (#7403) +- Do not double record measurements of `RecordSet` methods in `go.opentelemetry.io/otel/semconv/v1.37.0`. (#7655) +- Do not double record measurements of `RecordSet` methods in `go.opentelemetry.io/otel/semconv/v1.36.0`. (#7656) + +### Removed + +- Drop support for [Go 1.23]. (#7274) +- Remove the `FilterProcessor` interface in `go.opentelemetry.io/otel/sdk/log`. + The `Enabled` method has been added to the `Processor` interface instead. + All `Processor` implementations must now implement the `Enabled` method. + Custom processors that do not filter records can implement `Enabled` to return `true`. (#7639) + ## [1.38.0/0.60.0/0.14.0/0.0.13] 2025-08-29 This release is the last to support [Go 1.23]. @@ -3430,8 +3498,10 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.38.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.39.0...HEAD +[1.39.0/0.61.0/0.15.0/0.0.14]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.39.0 [1.38.0/0.60.0/0.14.0/0.0.13]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.38.0 +[0.59.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/exporters/prometheus/v0.59.1 [1.37.0/0.59.0/0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.37.0 [0.12.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.2 [0.12.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.1 diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index 0b3ae855c19..ff5e1f76ecd 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -54,8 +54,8 @@ go get -d go.opentelemetry.io/otel (This may print some warning about "build constraints exclude all Go files", just ignore it.) -This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. You -can alternatively use `git` directly with: +This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. +Alternatively, you can use `git` directly with: ```sh git clone https://github.com/open-telemetry/opentelemetry-go @@ -65,8 +65,7 @@ git clone https://github.com/open-telemetry/opentelemetry-go that name is a kind of a redirector to GitHub that `go get` can understand, but `git` does not.) -This would put the project in the `opentelemetry-go` directory in -current working directory. +This will add the project as `opentelemetry-go` within the current directory. Enter the newly created directory and add your fork as a new remote: @@ -109,7 +108,7 @@ A PR is considered **ready to merge** when: This is not enforced through automation, but needs to be validated by the maintainer merging. - * At least one of the qualified approvals need to be from an + * At least one of the qualified approvals needs to be from an [Approver]/[Maintainer] affiliated with a different company than the author of the PR. * PRs introducing changes that have already been discussed and consensus @@ -166,11 +165,11 @@ guidelines](https://opentelemetry.io/docs/specs/otel/library-guidelines). ### Focus on Capabilities, Not Structure Compliance OpenTelemetry is an evolving specification, one where the desires and -use cases are clear, but the method to satisfy those uses cases are +use cases are clear, but the methods to satisfy those use cases are not. As such, Contributions should provide functionality and behavior that -conforms to the specification, but the interface and structure is +conforms to the specification, but the interface and structure are flexible. It is preferable to have contributions follow the idioms of the @@ -217,7 +216,7 @@ about dependency compatibility. This project does not partition dependencies based on the environment (i.e. `development`, `staging`, `production`). -Only the dependencies explicitly included in the released modules have be +Only the dependencies explicitly included in the released modules have been tested and verified to work with the released code. No other guarantee is made about the compatibility of other dependencies. @@ -635,8 +634,8 @@ is not in their root name. The use of internal packages should be scoped to a single module. A sub-module should never import from a parent internal package. This creates a coupling -between the two modules where a user can upgrade the parent without the child -and if the internal package API has changed it will fail to upgrade[^3]. +between the two modules where a user can upgrade the parent without the child, +and if the internal package API has changed, it will fail to upgrade[^3]. There are two known exceptions to this rule: @@ -657,7 +656,7 @@ this. ### Ignoring context cancellation -OpenTelemetry API implementations need to ignore the cancellation of the context that are +OpenTelemetry API implementations need to ignore the cancellation of the context that is passed when recording a value (e.g. starting a span, recording a measurement, emitting a log). Recording methods should not return an error describing the cancellation state of the context when they complete, nor should they abort any work. @@ -675,6 +674,441 @@ force flushing telemetry, shutting down a signal provider) the context cancellat should be honored. This means all work done on behalf of the user provided context should be canceled. +### Observability + +OpenTelemetry Go SDK components should be instrumented to enable users observability for the health and performance of the telemetry pipeline itself. +This allows operators to understand how well their observability infrastructure is functioning and to identify potential issues before they impact their applications. + +This section outlines the best practices for building instrumentation in OpenTelemetry Go SDK components. + +#### Environment Variable Activation + +Observability features are currently experimental. +They should be disabled by default and activated through the `OTEL_GO_X_OBSERVABILITY` environment variable. +This follows the established experimental feature pattern used throughout the SDK. + +Components should check for this environment variable using a consistent pattern: + +```go +import "go.opentelemetry.io/otel/*/internal/x" + +if x.Observability.Enabled() { + // Initialize observability metrics +} +``` + +**References**: + +- [stdouttrace exporter](./exporters/stdout/stdouttrace/internal/x/x.go) +- [sdk](./sdk/internal/x/x.go) + +#### Encapsulation + +Instrumentation should be encapsulated within a dedicated `struct` (e.g. `instrumentation`). +It should not be mixed into the instrumented component. + +Prefer this: + +```go +type SDKComponent struct { + inst *instrumentation +} + +type instrumentation struct { + inflight otelconv.SDKComponentInflight + exported otelconv.SDKComponentExported +} +``` + +To this: + +```go +// ❌ Avoid this pattern. +type SDKComponent struct { + /* other SDKComponent fields... */ + + inflight otelconv.SDKComponentInflight + exported otelconv.SDKComponentExported +} +``` + +The instrumentation code should not bloat the code being instrumented. +Likely, this means its own file, or its own package if it is complex or reused. + +#### Initialization + +Instrumentation setup should be explicit, side-effect free, and local to the relevant component. +Avoid relying on global or implicit [side effects][side-effect] for initialization. + +Encapsulate setup in constructor functions, ensuring clear ownership and scope: + +```go +import ( + "errors" + + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" +) + +type SDKComponent struct { + inst *instrumentation +} + +func NewSDKComponent(config Config) (*SDKComponent, error) { + inst, err := newInstrumentation() + if err != nil { + return nil, err + } + return &SDKComponent{inst: inst}, nil +} + +type instrumentation struct { + inflight otelconv.SDKComponentInflight + exported otelconv.SDKComponentExported +} + +func newInstrumentation() (*instrumentation, error) { + if !x.Observability.Enabled() { + return nil, nil + } + + meter := otel.GetMeterProvider().Meter( + "", + metric.WithInstrumentationVersion(sdk.Version()), + metric.WithSchemaURL(semconv.SchemaURL), + ) + + inst := &instrumentation{} + + var err, e error + inst.inflight, e = otelconv.NewSDKComponentInflight(meter) + err = errors.Join(err, e) + + inst.exported, e = otelconv.NewSDKComponentExported(meter) + err = errors.Join(err, e) + + return inst, err +} +``` + +```go +// ❌ Avoid this pattern. +func (c *Component) initObservability() { + // Initialize observability metrics + if !x.Observability.Enabled() { + return + } + + // Initialize observability metrics + c.inst = &instrumentation{/* ... */} +} +``` + +[side-effect]: https://en.wikipedia.org/wiki/Side_effect_(computer_science) + +#### Performance + +When observability is disabled there should be little to no overhead. + +```go +func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error { + if e.inst != nil { + attrs := expensiveOperation() + e.inst.recordSpanInflight(ctx, int64(len(spans)), attrs...) + } + // Export spans... +} +``` + +```go +// ❌ Avoid this pattern. +func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error { + attrs := expensiveOperation() + e.inst.recordSpanInflight(ctx, int64(len(spans)), attrs...) + // Export spans... +} + +func (i *instrumentation) recordSpanInflight(ctx context.Context, count int64, attrs ...attribute.KeyValue) { + if i == nil || i.inflight == nil { + return + } + i.inflight.Add(ctx, count, metric.WithAttributes(attrs...)) +} +``` + +When observability is enabled, the instrumentation code paths should be optimized to reduce allocation and computation overhead. + +##### Attribute and Option Allocation Management + +Pool attribute slices and options with [`sync.Pool`] to minimize allocations in measurement calls with dynamic attributes. + +```go +var ( + attrPool = sync.Pool{ + New: func() any { + // Pre-allocate common capacity + knownCap := 8 // Adjust based on expected usage + s := make([]attribute.KeyValue, 0, knownCap) + // Return a pointer to avoid extra allocation on Put(). + return &s + }, + } + + addOptPool = &sync.Pool{ + New: func() any { + const n = 1 // WithAttributeSet + o := make([]metric.AddOption, 0, n) + // Return a pointer to avoid extra allocation on Put(). + return &o + }, + } +) + +func (i *instrumentation) record(ctx context.Context, value int64, baseAttrs ...attribute.KeyValue) { + attrs := attrPool.Get().(*[]attribute.KeyValue) + defer func() { + *attrs = (*attrs)[:0] // Reset. + attrPool.Put(attrs) + }() + + *attrs = append(*attrs, baseAttrs...) + // Add any dynamic attributes. + *attrs = append(*attrs, semconv.OTelComponentName("exporter-1")) + + addOpt := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *addOpt = (*addOpt)[:0] + addOptPool.Put(addOpt) + }() + + set := attribute.NewSet(*attrs...) + *addOpt = append(*addOpt, metric.WithAttributeSet(set)) + + i.counter.Add(ctx, value, *addOpt...) +} +``` + +Pools are most effective when there are many pooled objects of the same sufficiently large size, and the objects are repeatedly used. +This amortizes the cost of allocation and synchronization. +Ideally, the pools should be scoped to be used as widely as possible within the component to maximize this efficiency while still ensuring correctness. + +[`sync.Pool`]: https://pkg.go.dev/sync#Pool + +##### Cache common attribute sets for repeated measurements + +If a static set of attributes are used for measurements and they are known at compile time, pre-compute and cache these attributes. + +```go +type spanLiveSetKey struct { + sampled bool +} + +var spanLiveSetCache = map[spanLiveSetKey]attribute.Set{ + {true}: attribute.NewSet( + otelconv.SDKSpanLive{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordAndSample, + ), + ), + {false}: attribute.NewSet( + otelconv.SDKSpanLive{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordOnly, + ), + ), +} + +func spanLiveSet(sampled bool) attribute.Set { + key := spanLiveSetKey{sampled: sampled} + return spanLiveSetCache[key] +} +``` + +##### Benchmarking + +Always provide benchmarks when introducing or refactoring instrumentation. +Demonstrate the impact (allocs/op, B/op, ns/op) in enabled/disabled scenarios: + +```go +func BenchmarkExportSpans(b *testing.B) { + scenarios := []struct { + name string + obsEnabled bool + }{ + {"ObsDisabled", false}, + {"ObsEnabled", true}, + } + + for _, scenario := range scenarios { + b.Run(scenario.name, func(b *testing.B) { + b.Setenv( + "OTEL_GO_X_OBSERVABILITY", + strconv.FormatBool(scenario.obsEnabled), + ) + + exporter := NewExporter() + spans := generateTestSpans(100) + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + _ = exporter.ExportSpans(context.Background(), spans) + } + }) + } +} +``` + +#### Error Handling and Robustness + +Errors should be reported back to the caller if possible, and partial failures should be handled as gracefully as possible. + +```go +func newInstrumentation() (*instrumentation, error) { + if !x.Observability.Enabled() { + return nil, nil + } + + m := otel.GetMeterProvider().Meter(/* initialize meter */) + counter, err := otelconv.NewSDKComponentCounter(m) + // Use the partially initialized counter if available. + i := &instrumentation{counter: counter} + // Return any error to the caller. + return i, err +} +``` + +```go +// ❌ Avoid this pattern. +func newInstrumentation() *instrumentation { + if !x.Observability.Enabled() { + return nil, nil + } + + m := otel.GetMeterProvider().Meter(/* initialize meter */) + counter, err := otelconv.NewSDKComponentCounter(m) + if err != nil { + // ❌ Do not dump the error to the OTel Handler. Return it to the + // caller. + otel.Handle(err) + // ❌ Do not return nil if we can still use the partially initialized + // counter. + return nil + } + return &instrumentation{counter: counter} +} +``` + +If the instrumented component cannot report the error to the user, let it report the error to `otel.Handle`. + +#### Context Propagation + +Ensure observability measurements receive the correct context, especially for trace exemplars and distributed context: + +```go +func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error { + // Use the provided context for observability measurements + e.inst.recordSpanExportStarted(ctx, len(spans)) + + err := e.doExport(ctx, spans) + + if err != nil { + e.inst.recordSpanExportFailed(ctx, len(spans), err) + } else { + e.inst.recordSpanExportSucceeded(ctx, len(spans)) + } + + return err +} +``` + +```go +// ❌ Avoid this pattern. +func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error { + // ❌ Do not break the context propagation. + e.inst.recordSpanExportStarted(context.Background(), len(spans)) + + err := e.doExport(ctx, spans) + + /* ... */ + + return err +} +``` + +#### Semantic Conventions Compliance + +All observability metrics should follow the [OpenTelemetry Semantic Conventions for SDK metrics](https://github.com/open-telemetry/semantic-conventions/blob/1cf2476ae5e518225a766990a28a6d5602bd5a30/docs/otel/sdk-metrics.md). + +Use the metric semantic conventions convenience package [otelconv](./semconv/v1.37.0/otelconv/metric.go). + +##### Component Identification + +Component names and types should follow [semantic convention](https://github.com/open-telemetry/semantic-conventions/blob/1cf2476ae5e518225a766990a28a6d5602bd5a30/docs/registry/attributes/otel.md#otel-component-attributes). + +If a component is not a well-known type specified in the semantic conventions, use the package path scope type as a stable identifier. + +```go +componentType := "go.opentelemetry.io/otel/sdk/trace.Span" +``` + +```go +// ❌ Do not do this. +componentType := "trace-span" +``` + +The component name should be a stable unique identifier for the specific instance of the component. + +Use a global counter to ensure uniqueness if necessary. + +```go +// Unique 0-based ID counter for component instances. +var componentIDCounter atomic.Int64 + +// nextID returns the next unique ID for a component. +func nextID() int64 { + return componentIDCounter.Add(1) - 1 +} + +// componentName returns a unique name for the component instance. +func componentName() attribute.KeyValue { + id := nextID() + name := fmt.Sprintf("%s/%d", componentType, id) + return semconv.OTelComponentName(name) +} +``` + +The component ID will need to be resettable for deterministic testing. +If tests are in a different package than the component being tested (i.e. a `_test` package name), use a generated `counter` internal package to manage the counter. +See [stdouttrace exporter example](./exporters/stdout/stdouttrace/internal/gen.go) for reference. + +#### Testing + +Use deterministic testing with isolated state: + +```go +func TestObservability(t *testing.T) { + // Restore state after test to ensure this does not affect other tests. + prev := otel.GetMeterProvider() + t.Cleanup(func() { otel.SetMeterProvider(prev) }) + + // Isolate the meter provider for deterministic testing + reader := metric.NewManualReader() + meterProvider := metric.NewMeterProvider(metric.WithReader(reader)) + otel.SetMeterProvider(meterProvider) + + // Use t.Setenv to ensure environment variable is restored after test. + t.Setenv("OTEL_GO_X_OBSERVABILITY", "true") + + // Reset component ID counter to ensure deterministic component names. + componentIDCounter.Store(0) + + /* ... test code ... */ +} +``` + +Test order should not affect results. +Ensure that any global state (e.g. component ID counters) is reset between tests. + ## Approvers and Maintainers ### Maintainers @@ -696,7 +1130,6 @@ For more information about the approver role, see the [community repository](htt ### Triagers - [Alex Kats](https://github.com/akats7), Capital One -- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent For more information about the triager role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#triager). @@ -704,6 +1137,7 @@ For more information about the triager role, see the [community repository](http - [Aaron Clawson](https://github.com/MadVikingGod) - [Anthony Mirabella](https://github.com/Aneurysm9) +- [Cheng-Zhen Yang](https://github.com/scorpionknifes) - [Chester Cheung](https://github.com/hanyuancheung) - [Evan Torrie](https://github.com/evantorrie) - [Gustavo Silva Paiva](https://github.com/paivagustavo) diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index bc0f1f92d1f..44870248c32 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -146,11 +146,12 @@ build-tests/%: # Tests -TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe +TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe test-fuzz .PHONY: $(TEST_TARGETS) test test-default test-race: ARGS=-race test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=. test-short: ARGS=-short +test-fuzz: ARGS=-fuzztime=10s -fuzz test-verbose: ARGS=-v -race test-concurrent-safe: ARGS=-run=ConcurrentSafe -count=100 -race test-concurrent-safe: TIMEOUT=120 diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 6b7ab5f2193..c6335954311 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -55,25 +55,18 @@ Currently, this project supports the following environments. |----------|------------|--------------| | Ubuntu | 1.25 | amd64 | | Ubuntu | 1.24 | amd64 | -| Ubuntu | 1.23 | amd64 | | Ubuntu | 1.25 | 386 | | Ubuntu | 1.24 | 386 | -| Ubuntu | 1.23 | 386 | | Ubuntu | 1.25 | arm64 | | Ubuntu | 1.24 | arm64 | -| Ubuntu | 1.23 | arm64 | -| macOS 13 | 1.25 | amd64 | -| macOS 13 | 1.24 | amd64 | -| macOS 13 | 1.23 | amd64 | +| macOS | 1.25 | amd64 | +| macOS | 1.24 | amd64 | | macOS | 1.25 | arm64 | | macOS | 1.24 | arm64 | -| macOS | 1.23 | arm64 | | Windows | 1.25 | amd64 | | Windows | 1.24 | amd64 | -| Windows | 1.23 | amd64 | | Windows | 1.25 | 386 | | Windows | 1.24 | 386 | -| Windows | 1.23 | 386 | While this project should work for other systems, no compatibility guarantees are made for those systems currently. diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index 1ddcdef0396..861756fd745 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -24,7 +24,7 @@ Ensure things look correct before submitting a pull request to include the addit ## Breaking changes validation -You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes done in the public API. +You can run `make gorelease` which runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes made in the public API. You can check/report problems with `gorelease` [here](https://golang.org/issues/26420). @@ -62,7 +62,7 @@ Update go.mod for submodules to depend on the new release which will happen in t ``` 3. Update the [Changelog](./CHANGELOG.md). - - Make sure all relevant changes for this release are included and are in language that non-contributors to the project can understand. + - Make sure all relevant changes for this release are included and are written in language that non-contributors to the project can understand. To verify this, you can look directly at the commits since the ``. ``` @@ -107,33 +107,49 @@ It is critical you make sure the version you push upstream is correct. ... ``` +## Sign artifacts + +To ensure we comply with CNCF best practices, we need to sign the release artifacts. + +Download the `.tar.gz` and `.zip` archives from the [tags page](https://github.com/open-telemetry/opentelemetry-go/tags) for the new release tag. +Both archives need to be signed with your GPG key. + +You can use [this script] to verify the contents of the archives before signing them. + +To find your GPG key ID, run: + +```terminal +gpg --list-secret-keys --keyid-format=long +``` + +The key ID is the 16-character string after `sec rsa4096/` (or similar). + +Set environment variables and sign both artifacts: + +```terminal +export VERSION="" # e.g., v1.32.0 +export KEY_ID="" + +gpg --local-user $KEY_ID --armor --detach-sign opentelemetry-go-$VERSION.tar.gz +gpg --local-user $KEY_ID --armor --detach-sign opentelemetry-go-$VERSION.zip +``` + +You can verify the signatures with: + +```terminal +gpg --verify opentelemetry-go-$VERSION.tar.gz.asc opentelemetry-go-$VERSION.tar.gz +gpg --verify opentelemetry-go-$VERSION.zip.asc opentelemetry-go-$VERSION.zip +``` + +[this script]: https://github.com/MrAlias/attest-sh + ## Release Finally create a Release for the new `` on GitHub. The release body should include all the release notes from the Changelog for this release. -### Sign the Release Artifact - -To ensure we comply with CNCF best practices, we need to sign the release artifact. -The tarball attached to the GitHub release needs to be signed with your GPG key. - -Follow [these steps] to sign the release artifact and upload it to GitHub. -You can use [this script] to verify the contents of the tarball before signing it. - -Be sure to use the correct GPG key when signing the release artifact. - -```terminal -gpg --local-user --armor --detach-sign opentelemetry-go-.tar.gz -``` - -You can verify the signature with: - -```terminal -gpg --verify opentelemetry-go-.tar.gz.asc opentelemetry-go-.tar.gz -``` - -[these steps]: https://wiki.debian.org/Creating%20signed%20GitHub%20releases -[this script]: https://github.com/MrAlias/attest-sh +***IMPORTANT***: GitHub Releases are immutable once created. +You must upload the signed artifacts (`.tar.gz`, `.tar.gz.asc`, `.zip`, and `.zip.asc`) when creating the release, as they cannot be added or modified later. ## Post-Release @@ -160,14 +176,6 @@ This helps track what changes were included in each release. Once all related issues and PRs have been added to the milestone, close the milestone. -### Demo Repository - -Bump the dependencies in the following Go services: - -- [`accounting`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accounting) -- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkout) -- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/product-catalog) - ### Close the `Version Release` issue Once the todo list in the `Version Release` issue is complete, close the issue. diff --git a/vendor/go.opentelemetry.io/otel/VERSIONING.md b/vendor/go.opentelemetry.io/otel/VERSIONING.md index b8cb605c166..b27c9e84f51 100644 --- a/vendor/go.opentelemetry.io/otel/VERSIONING.md +++ b/vendor/go.opentelemetry.io/otel/VERSIONING.md @@ -83,7 +83,7 @@ is designed so the following goals can be achieved. in either the module path or the import path. * In addition to public APIs, telemetry produced by stable instrumentation will remain stable and backwards compatible. This is to avoid breaking - alerts and dashboard. + alerts and dashboards. * Modules will be used to encapsulate instrumentation, detectors, exporters, propagators, and any other independent sets of related components. * Experimental modules still under active development will be versioned at diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go index 6333d34b310..6cc1a1655cf 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/encoder.go +++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go @@ -16,7 +16,7 @@ type ( // set into a wire representation. Encoder interface { // Encode returns the serialized encoding of the attribute set using - // its Iterator. This result may be cached by a attribute.Set. + // its Iterator. This result may be cached by an attribute.Set. Encode(iterator Iterator) string // ID returns a value that is unique for each class of attribute diff --git a/vendor/go.opentelemetry.io/otel/attribute/hash.go b/vendor/go.opentelemetry.io/otel/attribute/hash.go new file mode 100644 index 00000000000..6aa69aeaecf --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/hash.go @@ -0,0 +1,92 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "fmt" + "reflect" + + "go.opentelemetry.io/otel/attribute/internal/xxhash" +) + +// Type identifiers. These identifiers are hashed before the value of the +// corresponding type. This is done to distinguish values that are hashed with +// the same value representation (e.g. `int64(1)` and `true`, []int64{0} and +// int64(0)). +// +// These are all 8 byte length strings converted to a uint64 representation. A +// uint64 is used instead of the string directly as an optimization, it avoids +// the for loop in [xxhash] which adds minor overhead. +const ( + boolID uint64 = 7953749933313450591 // "_boolean" (little endian) + int64ID uint64 = 7592915492740740150 // "64_bit_i" (little endian) + float64ID uint64 = 7376742710626956342 // "64_bit_f" (little endian) + stringID uint64 = 6874584755375207263 // "_string_" (little endian) + boolSliceID uint64 = 6875993255270243167 // "_[]bool_" (little endian) + int64SliceID uint64 = 3762322556277578591 // "_[]int64" (little endian) + float64SliceID uint64 = 7308324551835016539 // "[]double" (little endian) + stringSliceID uint64 = 7453010373645655387 // "[]string" (little endian) +) + +// hashKVs returns a new xxHash64 hash of kvs. +func hashKVs(kvs []KeyValue) uint64 { + h := xxhash.New() + for _, kv := range kvs { + h = hashKV(h, kv) + } + return h.Sum64() +} + +// hashKV returns the xxHash64 hash of kv with h as the base. +func hashKV(h xxhash.Hash, kv KeyValue) xxhash.Hash { + h = h.String(string(kv.Key)) + + switch kv.Value.Type() { + case BOOL: + h = h.Uint64(boolID) + h = h.Uint64(kv.Value.numeric) + case INT64: + h = h.Uint64(int64ID) + h = h.Uint64(kv.Value.numeric) + case FLOAT64: + h = h.Uint64(float64ID) + // Assumes numeric stored with math.Float64bits. + h = h.Uint64(kv.Value.numeric) + case STRING: + h = h.Uint64(stringID) + h = h.String(kv.Value.stringly) + case BOOLSLICE: + h = h.Uint64(boolSliceID) + rv := reflect.ValueOf(kv.Value.slice) + for i := 0; i < rv.Len(); i++ { + h = h.Bool(rv.Index(i).Bool()) + } + case INT64SLICE: + h = h.Uint64(int64SliceID) + rv := reflect.ValueOf(kv.Value.slice) + for i := 0; i < rv.Len(); i++ { + h = h.Int64(rv.Index(i).Int()) + } + case FLOAT64SLICE: + h = h.Uint64(float64SliceID) + rv := reflect.ValueOf(kv.Value.slice) + for i := 0; i < rv.Len(); i++ { + h = h.Float64(rv.Index(i).Float()) + } + case STRINGSLICE: + h = h.Uint64(stringSliceID) + rv := reflect.ValueOf(kv.Value.slice) + for i := 0; i < rv.Len(); i++ { + h = h.String(rv.Index(i).String()) + } + case INVALID: + default: + // Logging is an alternative, but using the internal logger here + // causes an import cycle so it is not done. + v := kv.Value.AsInterface() + msg := fmt.Sprintf("unknown value type: %[1]v (%[1]T)", v) + panic(msg) + } + return h +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/internal/xxhash/xxhash.go b/vendor/go.opentelemetry.io/otel/attribute/internal/xxhash/xxhash.go new file mode 100644 index 00000000000..113a978383b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/internal/xxhash/xxhash.go @@ -0,0 +1,64 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package xxhash provides a wrapper around the xxhash library for attribute hashing. +package xxhash // import "go.opentelemetry.io/otel/attribute/internal/xxhash" + +import ( + "encoding/binary" + "math" + + "github.com/cespare/xxhash/v2" +) + +// Hash wraps xxhash.Digest to provide an API friendly for hashing attribute values. +type Hash struct { + d *xxhash.Digest +} + +// New returns a new initialized xxHash64 hasher. +func New() Hash { + return Hash{d: xxhash.New()} +} + +func (h Hash) Uint64(val uint64) Hash { + var buf [8]byte + binary.LittleEndian.PutUint64(buf[:], val) + // errors from Write are always nil for xxhash + // if it returns an err then panic + _, err := h.d.Write(buf[:]) + if err != nil { + panic("xxhash write of uint64 failed: " + err.Error()) + } + return h +} + +func (h Hash) Bool(val bool) Hash { // nolint:revive // This is a hashing function. + if val { + return h.Uint64(1) + } + return h.Uint64(0) +} + +func (h Hash) Float64(val float64) Hash { + return h.Uint64(math.Float64bits(val)) +} + +func (h Hash) Int64(val int64) Hash { + return h.Uint64(uint64(val)) // nolint:gosec // Overflow doesn't matter since we are hashing. +} + +func (h Hash) String(val string) Hash { + // errors from WriteString are always nil for xxhash + // if it returns an err then panic + _, err := h.d.WriteString(val) + if err != nil { + panic("xxhash write of string failed: " + err.Error()) + } + return h +} + +// Sum64 returns the current hash value. +func (h Hash) Sum64() uint64 { + return h.d.Sum64() +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go index 64735d382ea..911d557ee54 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/set.go +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -9,6 +9,8 @@ import ( "reflect" "slices" "sort" + + "go.opentelemetry.io/otel/attribute/internal/xxhash" ) type ( @@ -23,19 +25,19 @@ type ( // the Equals method to ensure stable equivalence checking. // // Users should also use the Distinct returned from Equivalent as a map key - // instead of a Set directly. In addition to that type providing guarantees - // on stable equivalence, it may also provide performance improvements. + // instead of a Set directly. Set has relatively poor performance when used + // as a map key compared to Distinct. Set struct { - equivalent Distinct + hash uint64 + data any } - // Distinct is a unique identifier of a Set. + // Distinct is an identifier of a Set which is very likely to be unique. // - // Distinct is designed to ensure equivalence stability: comparisons will - // return the same value across versions. For this reason, Distinct should - // always be used as a map key instead of a Set. + // Distinct should be used as a map key instead of a Set for to provide better + // performance for map operations. Distinct struct { - iface any + hash uint64 } // Sortable implements sort.Interface, used for sorting KeyValue. @@ -46,15 +48,34 @@ type ( Sortable []KeyValue ) +// Compile time check these types remain comparable. +var ( + _ = isComparable(Set{}) + _ = isComparable(Distinct{}) +) + +func isComparable[T comparable](t T) T { return t } + var ( // keyValueType is used in computeDistinctReflect. keyValueType = reflect.TypeOf(KeyValue{}) - // emptySet is returned for empty attribute sets. - emptySet = &Set{ - equivalent: Distinct{ - iface: [0]KeyValue{}, - }, + // emptyHash is the hash of an empty set. + emptyHash = xxhash.New().Sum64() + + // userDefinedEmptySet is an empty set. It was mistakenly exposed to users + // as something they can assign to, so it must remain addressable and + // mutable. + // + // This is kept for backwards compatibility, but should not be used in new code. + userDefinedEmptySet = &Set{ + hash: emptyHash, + data: [0]KeyValue{}, + } + + emptySet = Set{ + hash: emptyHash, + data: [0]KeyValue{}, } ) @@ -62,33 +83,35 @@ var ( // // This is a convenience provided for optimized calling utility. func EmptySet() *Set { - return emptySet -} - -// reflectValue abbreviates reflect.ValueOf(d). -func (d Distinct) reflectValue() reflect.Value { - return reflect.ValueOf(d.iface) + // Continue to return the pointer to the user-defined empty set for + // backwards-compatibility. + // + // New code should not use this, instead use emptySet. + return userDefinedEmptySet } // Valid reports whether this value refers to a valid Set. -func (d Distinct) Valid() bool { - return d.iface != nil +func (d Distinct) Valid() bool { return d.hash != 0 } + +// reflectValue abbreviates reflect.ValueOf(d). +func (l Set) reflectValue() reflect.Value { + return reflect.ValueOf(l.data) } // Len returns the number of attributes in this set. func (l *Set) Len() int { - if l == nil || !l.equivalent.Valid() { + if l == nil || l.hash == 0 { return 0 } - return l.equivalent.reflectValue().Len() + return l.reflectValue().Len() } // Get returns the KeyValue at ordered position idx in this set. func (l *Set) Get(idx int) (KeyValue, bool) { - if l == nil || !l.equivalent.Valid() { + if l == nil || l.hash == 0 { return KeyValue{}, false } - value := l.equivalent.reflectValue() + value := l.reflectValue() if idx >= 0 && idx < value.Len() { // Note: The Go compiler successfully avoids an allocation for @@ -101,10 +124,10 @@ func (l *Set) Get(idx int) (KeyValue, bool) { // Value returns the value of a specified key in this set. func (l *Set) Value(k Key) (Value, bool) { - if l == nil || !l.equivalent.Valid() { + if l == nil || l.hash == 0 { return Value{}, false } - rValue := l.equivalent.reflectValue() + rValue := l.reflectValue() vlen := rValue.Len() idx := sort.Search(vlen, func(idx int) bool { @@ -144,20 +167,29 @@ func (l *Set) ToSlice() []KeyValue { return iter.ToSlice() } -// Equivalent returns a value that may be used as a map key. The Distinct type -// guarantees that the result will equal the equivalent. Distinct value of any +// Equivalent returns a value that may be used as a map key. Equal Distinct +// values are very likely to be equivalent attribute Sets. Distinct value of any // attribute set with the same elements as this, where sets are made unique by // choosing the last value in the input for any given key. func (l *Set) Equivalent() Distinct { - if l == nil || !l.equivalent.Valid() { - return emptySet.equivalent + if l == nil || l.hash == 0 { + return Distinct{hash: emptySet.hash} } - return l.equivalent + return Distinct{hash: l.hash} } // Equals reports whether the argument set is equivalent to this set. func (l *Set) Equals(o *Set) bool { - return l.Equivalent() == o.Equivalent() + if l.Equivalent() != o.Equivalent() { + return false + } + if l == nil || l.hash == 0 { + l = &emptySet + } + if o == nil || o.hash == 0 { + o = &emptySet + } + return l.data == o.data } // Encoded returns the encoded form of this set, according to encoder. @@ -169,12 +201,6 @@ func (l *Set) Encoded(encoder Encoder) string { return encoder.Encode(l.Iter()) } -func empty() Set { - return Set{ - equivalent: emptySet.equivalent, - } -} - // NewSet returns a new Set. See the documentation for // NewSetWithSortableFiltered for more details. // @@ -204,7 +230,7 @@ func NewSetWithSortable(kvs []KeyValue, _ *Sortable) Set { func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { // Check for empty set. if len(kvs) == 0 { - return empty(), nil + return emptySet, nil } // Stable sort so the following de-duplication can implement @@ -233,10 +259,10 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { if filter != nil { if div := filteredToFront(kvs, filter); div != 0 { - return Set{equivalent: computeDistinct(kvs[div:])}, kvs[:div] + return newSet(kvs[div:]), kvs[:div] } } - return Set{equivalent: computeDistinct(kvs)}, nil + return newSet(kvs), nil } // NewSetWithSortableFiltered returns a new Set. @@ -316,7 +342,7 @@ func (l *Set) Filter(re Filter) (Set, []KeyValue) { if first == 0 { // It is safe to assume len(slice) >= 1 given we found at least one // attribute above that needs to be filtered out. - return Set{equivalent: computeDistinct(slice[1:])}, slice[:1] + return newSet(slice[1:]), slice[:1] } // Move the filtered slice[first] to the front (preserving order). @@ -326,25 +352,24 @@ func (l *Set) Filter(re Filter) (Set, []KeyValue) { // Do not re-evaluate re(slice[first+1:]). div := filteredToFront(slice[1:first+1], re) + 1 - return Set{equivalent: computeDistinct(slice[div:])}, slice[:div] + return newSet(slice[div:]), slice[:div] } -// computeDistinct returns a Distinct using either the fixed- or -// reflect-oriented code path, depending on the size of the input. The input -// slice is assumed to already be sorted and de-duplicated. -func computeDistinct(kvs []KeyValue) Distinct { - iface := computeDistinctFixed(kvs) - if iface == nil { - iface = computeDistinctReflect(kvs) +// newSet returns a new set based on the sorted and uniqued kvs. +func newSet(kvs []KeyValue) Set { + s := Set{ + hash: hashKVs(kvs), + data: computeDataFixed(kvs), } - return Distinct{ - iface: iface, + if s.data == nil { + s.data = computeDataReflect(kvs) } + return s } -// computeDistinctFixed computes a Distinct for small slices. It returns nil -// if the input is too large for this code path. -func computeDistinctFixed(kvs []KeyValue) any { +// computeDataFixed computes a Set data for small slices. It returns nil if the +// input is too large for this code path. +func computeDataFixed(kvs []KeyValue) any { switch len(kvs) { case 1: return [1]KeyValue(kvs) @@ -371,9 +396,9 @@ func computeDistinctFixed(kvs []KeyValue) any { } } -// computeDistinctReflect computes a Distinct using reflection, works for any -// size input. -func computeDistinctReflect(kvs []KeyValue) any { +// computeDataReflect computes a Set data using reflection, works for any size +// input. +func computeDataReflect(kvs []KeyValue) any { at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem() for i, keyValue := range kvs { *(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue @@ -383,7 +408,7 @@ func computeDistinctReflect(kvs []KeyValue) any { // MarshalJSON returns the JSON encoding of the Set. func (l *Set) MarshalJSON() ([]byte, error) { - return json.Marshal(l.equivalent.iface) + return json.Marshal(l.data) } // MarshalLog is the marshaling function used by the logging system to represent this Set. diff --git a/vendor/go.opentelemetry.io/otel/attribute/type_string.go b/vendor/go.opentelemetry.io/otel/attribute/type_string.go index e584b24776b..24f1fa37dbe 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/type_string.go +++ b/vendor/go.opentelemetry.io/otel/attribute/type_string.go @@ -24,8 +24,9 @@ const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICE var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71} func (i Type) String() string { - if i < 0 || i >= Type(len(_Type_index)-1) { + idx := int(i) - 0 + if i < 0 || idx >= len(_Type_index)-1 { return "Type(" + strconv.FormatInt(int64(i), 10) + ")" } - return _Type_name[_Type_index[i]:_Type_index[i+1]] + return _Type_name[_Type_index[idx]:_Type_index[idx+1]] } diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index f83a448ec61..78e98c4c0f3 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -648,7 +648,7 @@ func parsePropertyInternal(s string) (p Property, ok bool) { // If we couldn't find any valid key character, // it means the key is either empty or invalid. if keyStart == keyEnd { - return + return p, ok } // Skip spaces after the key: " key< >= value ". @@ -658,13 +658,13 @@ func parsePropertyInternal(s string) (p Property, ok bool) { // A key can have no value, like: " key ". ok = true p.key = s[keyStart:keyEnd] - return + return p, ok } // If we have not reached the end and we can't find the '=' delimiter, // it means the property is invalid. if s[index] != keyValueDelimiter[0] { - return + return p, ok } // Attempting to parse the value. @@ -690,14 +690,14 @@ func parsePropertyInternal(s string) (p Property, ok bool) { // we have not reached the end, it means the property is // invalid, something like: " key = value value1". if index != len(s) { - return + return p, ok } // Decode a percent-encoded value. rawVal := s[valueStart:valueEnd] unescapeVal, err := url.PathUnescape(rawVal) if err != nil { - return + return p, ok } value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) @@ -706,7 +706,7 @@ func parsePropertyInternal(s string) (p Property, ok bool) { p.hasValue = true p.value = value - return + return p, ok } func skipSpace(s string, offset int) int { diff --git a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile index a311fbb4835..cadb87cc0ee 100644 --- a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile +++ b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile @@ -1,4 +1,4 @@ # This is a renovate-friendly source of Docker images. FROM python:3.13.6-slim-bullseye@sha256:e98b521460ee75bca92175c16247bdf7275637a8faaeb2bcfa19d879ae5c4b9a AS python -FROM otel/weaver:v0.17.1@sha256:32523b5e44fb44418786347e9f7dde187d8797adb6d57a2ee99c245346c3cdfe AS weaver +FROM otel/weaver:v0.19.0@sha256:3d20814cef548f1d31f27f054fb4cd6a05125641a9f7cc29fc7eb234e8052cd9 AS weaver FROM avtodev/markdown-lint:v1@sha256:6aeedc2f49138ce7a1cd0adffc1b1c0321b841dc2102408967d9301c031949ee AS markdown diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go index adb37b5b0e7..6db969f73c7 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -105,7 +105,7 @@ type delegatedInstrument interface { setDelegate(metric.Meter) } -// instID are the identifying properties of a instrument. +// instID are the identifying properties of an instrument. type instID struct { // name is the name of the stream. name string diff --git a/vendor/go.opentelemetry.io/otel/metric.go b/vendor/go.opentelemetry.io/otel/metric.go index 1e6473b32f3..527d9aec86b 100644 --- a/vendor/go.opentelemetry.io/otel/metric.go +++ b/vendor/go.opentelemetry.io/otel/metric.go @@ -11,7 +11,7 @@ import ( // Meter returns a Meter from the global MeterProvider. The name must be the // name of the library providing instrumentation. This name may be the same as // the instrumented code only if that code provides built-in instrumentation. -// If the name is empty, then a implementation defined default name will be +// If the name is empty, then an implementation defined default name will be // used instead. // // If this is called before a global MeterProvider is registered the returned diff --git a/vendor/go.opentelemetry.io/otel/metric/config.go b/vendor/go.opentelemetry.io/otel/metric/config.go index d9e3b13e4d1..e42dd6e70ab 100644 --- a/vendor/go.opentelemetry.io/otel/metric/config.go +++ b/vendor/go.opentelemetry.io/otel/metric/config.go @@ -3,7 +3,11 @@ package metric // import "go.opentelemetry.io/otel/metric" -import "go.opentelemetry.io/otel/attribute" +import ( + "slices" + + "go.opentelemetry.io/otel/attribute" +) // MeterConfig contains options for Meters. type MeterConfig struct { @@ -62,12 +66,38 @@ func WithInstrumentationVersion(version string) MeterOption { }) } -// WithInstrumentationAttributes sets the instrumentation attributes. +// WithInstrumentationAttributes adds the instrumentation attributes. // -// The passed attributes will be de-duplicated. +// This is equivalent to calling [WithInstrumentationAttributeSet] with an +// [attribute.Set] created from a clone of the passed attributes. +// [WithInstrumentationAttributeSet] is recommended for more control. +// +// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet] +// options are passed, the attributes will be merged together in the order +// they are passed. Attributes with duplicate keys will use the last value passed. func WithInstrumentationAttributes(attr ...attribute.KeyValue) MeterOption { + set := attribute.NewSet(slices.Clone(attr)...) + return WithInstrumentationAttributeSet(set) +} + +// WithInstrumentationAttributeSet adds the instrumentation attributes. +// +// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet] +// options are passed, the attributes will be merged together in the order +// they are passed. Attributes with duplicate keys will use the last value passed. +func WithInstrumentationAttributeSet(set attribute.Set) MeterOption { + if set.Len() == 0 { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + return config + }) + } + return meterOptionFunc(func(config MeterConfig) MeterConfig { - config.attrs = attribute.NewSet(attr...) + if config.attrs.Len() == 0 { + config.attrs = set + } else { + config.attrs = mergeSets(config.attrs, set) + } return config }) } diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go index 6692d2665d2..271ab71f1ae 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go +++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go @@ -111,7 +111,7 @@ func (TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { } // Clear all flags other than the trace-context supported sampling bit. - scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled + scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled // nolint:gosec // slice size already checked. // Ignore the error returned here. Failure to parse tracestate MUST NOT // affect the parsing of traceparent according to the W3C tracecontext diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go b/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go new file mode 100644 index 00000000000..bfeb73e811b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package x documents experimental features for [go.opentelemetry.io/otel/sdk]. +package x // import "go.opentelemetry.io/otel/sdk/internal/x" + +import "strings" + +// Resource is an experimental feature flag that defines if resource detectors +// should be included experimental semantic conventions. +// +// To enable this feature set the OTEL_GO_X_RESOURCE environment variable +// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" +// will also enable this). +var Resource = newFeature( + []string{"RESOURCE"}, + func(v string) (string, bool) { + if strings.EqualFold(v, "true") { + return v, true + } + return "", false + }, +) + +// Observability is an experimental feature flag that determines if SDK +// observability metrics are enabled. +// +// To enable this feature set the OTEL_GO_X_OBSERVABILITY environment variable +// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" +// will also enable this). +var Observability = newFeature( + []string{"OBSERVABILITY", "SELF_OBSERVABILITY"}, + func(v string) (string, bool) { + if strings.EqualFold(v, "true") { + return v, true + } + return "", false + }, +) diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go index 1be472e917a..13347e56052 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go @@ -1,48 +1,38 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/x/x.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -// Package x contains support for OTel SDK experimental features. -// -// This package should only be used for features defined in the specification. -// It should not be used for experiments or new project ideas. +// Package x documents experimental features for [go.opentelemetry.io/otel/sdk]. package x // import "go.opentelemetry.io/otel/sdk/internal/x" import ( "os" - "strings" ) -// Resource is an experimental feature flag that defines if resource detectors -// should be included experimental semantic conventions. -// -// To enable this feature set the OTEL_GO_X_RESOURCE environment variable -// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" -// will also enable this). -var Resource = newFeature("RESOURCE", func(v string) (string, bool) { - if strings.EqualFold(v, "true") { - return v, true - } - return "", false -}) - // Feature is an experimental feature control flag. It provides a uniform way // to interact with these feature flags and parse their values. type Feature[T any] struct { - key string + keys []string parse func(v string) (T, bool) } -func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] { +func newFeature[T any](suffix []string, parse func(string) (T, bool)) Feature[T] { const envKeyRoot = "OTEL_GO_X_" + keys := make([]string, 0, len(suffix)) + for _, s := range suffix { + keys = append(keys, envKeyRoot+s) + } return Feature[T]{ - key: envKeyRoot + suffix, + keys: keys, parse: parse, } } -// Key returns the environment variable key that needs to be set to enable the +// Keys returns the environment variable keys that can be set to enable the // feature. -func (f Feature[T]) Key() string { return f.key } +func (f Feature[T]) Keys() []string { return f.keys } // Lookup returns the user configured value for the feature and true if the // user has enabled the feature. Otherwise, if the feature is not enabled, a @@ -52,11 +42,13 @@ func (f Feature[T]) Lookup() (v T, ok bool) { // // > The SDK MUST interpret an empty value of an environment variable the // > same way as when the variable is unset. - vRaw := os.Getenv(f.key) - if vRaw == "" { - return v, ok + for _, key := range f.keys { + vRaw := os.Getenv(key) + if vRaw != "" { + return f.parse(vRaw) + } } - return f.parse(vRaw) + return v, ok } // Enabled reports whether the feature is enabled. diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go index cc8b8938ed5..4c1c30f2560 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 //go:build dragonfly || freebsd || netbsd || openbsd || solaris -// +build dragonfly freebsd netbsd openbsd solaris package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go index f84f173240f..4a26096c8d0 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 //go:build linux -// +build linux package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go index df12c44c564..63ad2fa4e05 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 //go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go index 3677c83d7da..2b8ca20b381 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 //go:build windows -// +build windows package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go index 7252af79fc9..a1763267c22 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 //go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix dragonfly freebsd linux netbsd openbsd solaris zos package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go index a6ff26a4d27..6c50ab68677 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go index a77742b0771..25f629532a3 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go index 9bc3e525d19..7d15cbb9c0f 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -6,20 +6,14 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" "errors" - "fmt" "sync" "sync/atomic" "time" "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/internal/global" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/sdk" - "go.opentelemetry.io/otel/sdk/internal/env" - "go.opentelemetry.io/otel/sdk/trace/internal/x" - semconv "go.opentelemetry.io/otel/semconv/v1.37.0" - "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" + "go.opentelemetry.io/otel/sdk/trace/internal/env" + "go.opentelemetry.io/otel/sdk/trace/internal/observ" "go.opentelemetry.io/otel/trace" ) @@ -33,8 +27,6 @@ const ( DefaultMaxExportBatchSize = 512 ) -var queueFull = otelconv.ErrorTypeAttr("queue_full") - // BatchSpanProcessorOption configures a BatchSpanProcessor. type BatchSpanProcessorOption func(o *BatchSpanProcessorOptions) @@ -78,10 +70,7 @@ type batchSpanProcessor struct { queue chan ReadOnlySpan dropped uint32 - selfObservabilityEnabled bool - callbackRegistration metric.Registration - spansProcessedCounter otelconv.SDKProcessorSpanProcessed - componentNameAttr attribute.KeyValue + inst *observ.BSP batch []ReadOnlySpan batchMutex sync.Mutex @@ -124,19 +113,14 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO stopCh: make(chan struct{}), } - if x.SelfObservability.Enabled() { - bsp.selfObservabilityEnabled = true - bsp.componentNameAttr = componentName() - - var err error - bsp.spansProcessedCounter, bsp.callbackRegistration, err = newBSPObs( - bsp.componentNameAttr, - func() int64 { return int64(len(bsp.queue)) }, - int64(bsp.o.MaxQueueSize), - ) - if err != nil { - otel.Handle(err) - } + var err error + bsp.inst, err = observ.NewBSP( + nextProcessorID(), + func() int64 { return int64(len(bsp.queue)) }, + int64(bsp.o.MaxQueueSize), + ) + if err != nil { + otel.Handle(err) } bsp.stopWait.Add(1) @@ -157,51 +141,6 @@ func nextProcessorID() int64 { return processorIDCounter.Add(1) - 1 } -func componentName() attribute.KeyValue { - id := nextProcessorID() - name := fmt.Sprintf("%s/%d", otelconv.ComponentTypeBatchingSpanProcessor, id) - return semconv.OTelComponentName(name) -} - -// newBSPObs creates and returns a new set of metrics instruments and a -// registration for a BatchSpanProcessor. It is the caller's responsibility -// to unregister the registration when it is no longer needed. -func newBSPObs( - cmpnt attribute.KeyValue, - qLen func() int64, - qMax int64, -) (otelconv.SDKProcessorSpanProcessed, metric.Registration, error) { - meter := otel.GetMeterProvider().Meter( - selfObsScopeName, - metric.WithInstrumentationVersion(sdk.Version()), - metric.WithSchemaURL(semconv.SchemaURL), - ) - - qCap, err := otelconv.NewSDKProcessorSpanQueueCapacity(meter) - - qSize, e := otelconv.NewSDKProcessorSpanQueueSize(meter) - err = errors.Join(err, e) - - spansProcessed, e := otelconv.NewSDKProcessorSpanProcessed(meter) - err = errors.Join(err, e) - - cmpntT := semconv.OTelComponentTypeBatchingSpanProcessor - attrs := metric.WithAttributes(cmpnt, cmpntT) - - reg, e := meter.RegisterCallback( - func(_ context.Context, o metric.Observer) error { - o.ObserveInt64(qSize.Inst(), qLen(), attrs) - o.ObserveInt64(qCap.Inst(), qMax, attrs) - return nil - }, - qSize.Inst(), - qCap.Inst(), - ) - err = errors.Join(err, e) - - return spansProcessed, reg, err -} - // OnStart method does nothing. func (*batchSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} @@ -242,8 +181,8 @@ func (bsp *batchSpanProcessor) Shutdown(ctx context.Context) error { case <-ctx.Done(): err = ctx.Err() } - if bsp.selfObservabilityEnabled { - err = errors.Join(err, bsp.callbackRegistration.Unregister()) + if bsp.inst != nil { + err = errors.Join(err, bsp.inst.Shutdown()) } }) return err @@ -357,10 +296,8 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { if l := len(bsp.batch); l > 0 { global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", atomic.LoadUint32(&bsp.dropped)) - if bsp.selfObservabilityEnabled { - bsp.spansProcessedCounter.Add(ctx, int64(l), - bsp.componentNameAttr, - bsp.spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeBatchingSpanProcessor)) + if bsp.inst != nil { + bsp.inst.Processed(ctx, int64(l)) } err := bsp.e.ExportSpans(ctx, bsp.batch) @@ -470,11 +407,8 @@ func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd R case bsp.queue <- sd: return true case <-ctx.Done(): - if bsp.selfObservabilityEnabled { - bsp.spansProcessedCounter.Add(ctx, 1, - bsp.componentNameAttr, - bsp.spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeBatchingSpanProcessor), - bsp.spansProcessedCounter.AttrErrorType(queueFull)) + if bsp.inst != nil { + bsp.inst.ProcessedQueueFull(ctx, 1) } return false } @@ -490,11 +424,8 @@ func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan) return true default: atomic.AddUint32(&bsp.dropped, 1) - if bsp.selfObservabilityEnabled { - bsp.spansProcessedCounter.Add(ctx, 1, - bsp.componentNameAttr, - bsp.spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeBatchingSpanProcessor), - bsp.spansProcessedCounter.AttrErrorType(queueFull)) + if bsp.inst != nil { + bsp.inst.ProcessedQueueFull(ctx, 1) } } return false diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go index e58e7f6ed78..b502c7d4798 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go @@ -7,7 +7,7 @@ Package trace contains support for OpenTelemetry distributed tracing. The following assumes a basic familiarity with OpenTelemetry concepts. See https://opentelemetry.io. -See [go.opentelemetry.io/otel/sdk/trace/internal/x] for information about +See [go.opentelemetry.io/otel/sdk/internal/x] for information about the experimental features. */ package trace // import "go.opentelemetry.io/otel/sdk/trace" diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/env/env.go similarity index 98% rename from vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go rename to vendor/go.opentelemetry.io/otel/sdk/trace/internal/env/env.go index e3309231d42..58f68df4417 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/env/env.go @@ -3,7 +3,7 @@ // Package env provides types and functionality for environment variable support // in the OpenTelemetry SDK. -package env // import "go.opentelemetry.io/otel/sdk/internal/env" +package env // import "go.opentelemetry.io/otel/sdk/trace/internal/env" import ( "os" diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/batch_span_processor.go new file mode 100644 index 00000000000..bd7fe236296 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/batch_span_processor.go @@ -0,0 +1,119 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package observ // import "go.opentelemetry.io/otel/sdk/trace/internal/observ" + +import ( + "context" + "errors" + "fmt" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/sdk" + "go.opentelemetry.io/otel/sdk/internal/x" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" +) + +const ( + // ScopeName is the name of the instrumentation scope. + ScopeName = "go.opentelemetry.io/otel/sdk/trace/internal/observ" + + // SchemaURL is the schema URL of the instrumentation. + SchemaURL = semconv.SchemaURL +) + +// ErrQueueFull is the attribute value for the "queue_full" error type. +var ErrQueueFull = otelconv.SDKProcessorSpanProcessed{}.AttrErrorType( + otelconv.ErrorTypeAttr("queue_full"), +) + +// BSPComponentName returns the component name attribute for a +// BatchSpanProcessor with the given ID. +func BSPComponentName(id int64) attribute.KeyValue { + t := otelconv.ComponentTypeBatchingSpanProcessor + name := fmt.Sprintf("%s/%d", t, id) + return semconv.OTelComponentName(name) +} + +// BSP is the instrumentation for an OTel SDK BatchSpanProcessor. +type BSP struct { + reg metric.Registration + + processed metric.Int64Counter + processedOpts []metric.AddOption + processedQueueFullOpts []metric.AddOption +} + +func NewBSP(id int64, qLen func() int64, qMax int64) (*BSP, error) { + if !x.Observability.Enabled() { + return nil, nil + } + + meter := otel.GetMeterProvider().Meter( + ScopeName, + metric.WithInstrumentationVersion(sdk.Version()), + metric.WithSchemaURL(SchemaURL), + ) + + qCap, err := otelconv.NewSDKProcessorSpanQueueCapacity(meter) + if err != nil { + err = fmt.Errorf("failed to create BSP queue capacity metric: %w", err) + } + qCapInst := qCap.Inst() + + qSize, e := otelconv.NewSDKProcessorSpanQueueSize(meter) + if e != nil { + e := fmt.Errorf("failed to create BSP queue size metric: %w", e) + err = errors.Join(err, e) + } + qSizeInst := qSize.Inst() + + cmpntT := semconv.OTelComponentTypeBatchingSpanProcessor + cmpnt := BSPComponentName(id) + set := attribute.NewSet(cmpnt, cmpntT) + + obsOpts := []metric.ObserveOption{metric.WithAttributeSet(set)} + reg, e := meter.RegisterCallback( + func(_ context.Context, o metric.Observer) error { + o.ObserveInt64(qSizeInst, qLen(), obsOpts...) + o.ObserveInt64(qCapInst, qMax, obsOpts...) + return nil + }, + qSizeInst, + qCapInst, + ) + if e != nil { + e := fmt.Errorf("failed to register BSP queue size/capacity callback: %w", e) + err = errors.Join(err, e) + } + + processed, e := otelconv.NewSDKProcessorSpanProcessed(meter) + if e != nil { + e := fmt.Errorf("failed to create BSP processed spans metric: %w", e) + err = errors.Join(err, e) + } + processedOpts := []metric.AddOption{metric.WithAttributeSet(set)} + + set = attribute.NewSet(cmpnt, cmpntT, ErrQueueFull) + processedQueueFullOpts := []metric.AddOption{metric.WithAttributeSet(set)} + + return &BSP{ + reg: reg, + processed: processed.Inst(), + processedOpts: processedOpts, + processedQueueFullOpts: processedQueueFullOpts, + }, err +} + +func (b *BSP) Shutdown() error { return b.reg.Unregister() } + +func (b *BSP) Processed(ctx context.Context, n int64) { + b.processed.Add(ctx, n, b.processedOpts...) +} + +func (b *BSP) ProcessedQueueFull(ctx context.Context, n int64) { + b.processed.Add(ctx, n, b.processedQueueFullOpts...) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/doc.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/doc.go new file mode 100644 index 00000000000..b542121e6a2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/doc.go @@ -0,0 +1,6 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package observ provides observability instrumentation for the OTel trace SDK +// package. +package observ // import "go.opentelemetry.io/otel/sdk/trace/internal/observ" diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/simple_span_processor.go new file mode 100644 index 00000000000..7d33870613a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/simple_span_processor.go @@ -0,0 +1,97 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package observ // import "go.opentelemetry.io/otel/sdk/trace/internal/observ" + +import ( + "context" + "fmt" + "sync" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/sdk" + "go.opentelemetry.io/otel/sdk/internal/x" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" +) + +var measureAttrsPool = sync.Pool{ + New: func() any { + // "component.name" + "component.type" + "error.type" + const n = 1 + 1 + 1 + s := make([]attribute.KeyValue, 0, n) + // Return a pointer to a slice instead of a slice itself + // to avoid allocations on every call. + return &s + }, +} + +// SSP is the instrumentation for an OTel SDK SimpleSpanProcessor. +type SSP struct { + spansProcessedCounter metric.Int64Counter + addOpts []metric.AddOption + attrs []attribute.KeyValue +} + +// SSPComponentName returns the component name attribute for a +// SimpleSpanProcessor with the given ID. +func SSPComponentName(id int64) attribute.KeyValue { + t := otelconv.ComponentTypeSimpleSpanProcessor + name := fmt.Sprintf("%s/%d", t, id) + return semconv.OTelComponentName(name) +} + +// NewSSP returns instrumentation for an OTel SDK SimpleSpanProcessor with the +// provided ID. +// +// If the experimental observability is disabled, nil is returned. +func NewSSP(id int64) (*SSP, error) { + if !x.Observability.Enabled() { + return nil, nil + } + + meter := otel.GetMeterProvider().Meter( + ScopeName, + metric.WithInstrumentationVersion(sdk.Version()), + metric.WithSchemaURL(SchemaURL), + ) + spansProcessedCounter, err := otelconv.NewSDKProcessorSpanProcessed(meter) + if err != nil { + err = fmt.Errorf("failed to create SSP processed spans metric: %w", err) + } + + componentName := SSPComponentName(id) + componentType := spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeSimpleSpanProcessor) + attrs := []attribute.KeyValue{componentName, componentType} + addOpts := []metric.AddOption{metric.WithAttributeSet(attribute.NewSet(attrs...))} + + return &SSP{ + spansProcessedCounter: spansProcessedCounter.Inst(), + addOpts: addOpts, + attrs: attrs, + }, err +} + +// SpanProcessed records that a span has been processed by the SimpleSpanProcessor. +// If err is non-nil, it records the processing error as an attribute. +func (ssp *SSP) SpanProcessed(ctx context.Context, err error) { + ssp.spansProcessedCounter.Add(ctx, 1, ssp.addOption(err)...) +} + +func (ssp *SSP) addOption(err error) []metric.AddOption { + if err == nil { + return ssp.addOpts + } + attrs := measureAttrsPool.Get().(*[]attribute.KeyValue) + defer func() { + *attrs = (*attrs)[:0] // reset the slice for reuse + measureAttrsPool.Put(attrs) + }() + *attrs = append(*attrs, ssp.attrs...) + *attrs = append(*attrs, semconv.ErrorType(err)) + // Do not inefficiently make a copy of attrs by using + // WithAttributes instead of WithAttributeSet. + return []metric.AddOption{metric.WithAttributeSet(attribute.NewSet(*attrs...))} +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/tracer.go new file mode 100644 index 00000000000..a8a16458981 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/tracer.go @@ -0,0 +1,223 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package observ // import "go.opentelemetry.io/otel/sdk/trace/internal/observ" + +import ( + "context" + "errors" + "fmt" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/sdk" + "go.opentelemetry.io/otel/sdk/internal/x" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" + "go.opentelemetry.io/otel/trace" +) + +var meterOpts = []metric.MeterOption{ + metric.WithInstrumentationVersion(sdk.Version()), + metric.WithSchemaURL(SchemaURL), +} + +// Tracer is instrumentation for an OTel SDK Tracer. +type Tracer struct { + enabled bool + + live metric.Int64UpDownCounter + started metric.Int64Counter +} + +func NewTracer() (Tracer, error) { + if !x.Observability.Enabled() { + return Tracer{}, nil + } + meter := otel.GetMeterProvider().Meter(ScopeName, meterOpts...) + + var err error + l, e := otelconv.NewSDKSpanLive(meter) + if e != nil { + e = fmt.Errorf("failed to create span live metric: %w", e) + err = errors.Join(err, e) + } + + s, e := otelconv.NewSDKSpanStarted(meter) + if e != nil { + e = fmt.Errorf("failed to create span started metric: %w", e) + err = errors.Join(err, e) + } + + return Tracer{enabled: true, live: l.Inst(), started: s.Inst()}, err +} + +func (t Tracer) Enabled() bool { return t.enabled } + +func (t Tracer) SpanStarted(ctx context.Context, psc trace.SpanContext, span trace.Span) { + key := spanStartedKey{ + parent: parentStateNoParent, + sampling: samplingStateDrop, + } + + if psc.IsValid() { + if psc.IsRemote() { + key.parent = parentStateRemoteParent + } else { + key.parent = parentStateLocalParent + } + } + + if span.IsRecording() { + if span.SpanContext().IsSampled() { + key.sampling = samplingStateRecordAndSample + } else { + key.sampling = samplingStateRecordOnly + } + } + + opts := spanStartedOpts[key] + t.started.Add(ctx, 1, opts...) +} + +func (t Tracer) SpanLive(ctx context.Context, span trace.Span) { + t.spanLive(ctx, 1, span) +} + +func (t Tracer) SpanEnded(ctx context.Context, span trace.Span) { + t.spanLive(ctx, -1, span) +} + +func (t Tracer) spanLive(ctx context.Context, value int64, span trace.Span) { + key := spanLiveKey{sampled: span.SpanContext().IsSampled()} + opts := spanLiveOpts[key] + t.live.Add(ctx, value, opts...) +} + +type parentState int + +const ( + parentStateNoParent parentState = iota + parentStateLocalParent + parentStateRemoteParent +) + +type samplingState int + +const ( + samplingStateDrop samplingState = iota + samplingStateRecordOnly + samplingStateRecordAndSample +) + +type spanStartedKey struct { + parent parentState + sampling samplingState +} + +var spanStartedOpts = map[spanStartedKey][]metric.AddOption{ + { + parentStateNoParent, + samplingStateDrop, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop), + )), + }, + { + parentStateLocalParent, + samplingStateDrop, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop), + )), + }, + { + parentStateRemoteParent, + samplingStateDrop, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop), + )), + }, + + { + parentStateNoParent, + samplingStateRecordOnly, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly), + )), + }, + { + parentStateLocalParent, + samplingStateRecordOnly, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly), + )), + }, + { + parentStateRemoteParent, + samplingStateRecordOnly, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly), + )), + }, + + { + parentStateNoParent, + samplingStateRecordAndSample, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample), + )), + }, + { + parentStateLocalParent, + samplingStateRecordAndSample, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample), + )), + }, + { + parentStateRemoteParent, + samplingStateRecordAndSample, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample), + )), + }, +} + +type spanLiveKey struct { + sampled bool +} + +var spanLiveOpts = map[spanLiveKey][]metric.AddOption{ + {true}: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanLive{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordAndSample, + ), + )), + }, + {false}: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanLive{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordOnly, + ), + )), + }, +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/README.md b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/README.md deleted file mode 100644 index feec16fa64b..00000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/README.md +++ /dev/null @@ -1,35 +0,0 @@ -# Experimental Features - -The Trace SDK contains features that have not yet stabilized in the OpenTelemetry specification. -These features are added to the OpenTelemetry Go Trace SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback. - -These features may change in backwards incompatible ways as feedback is applied. -See the [Compatibility and Stability](#compatibility-and-stability) section for more information. - -## Features - -- [Self-Observability](#self-observability) - -### Self-Observability - -The SDK provides a self-observability feature that allows you to monitor the SDK itself. - -To opt-in, set the environment variable `OTEL_GO_X_SELF_OBSERVABILITY` to `true`. - -When enabled, the SDK will create the following metrics using the global `MeterProvider`: - -- `otel.sdk.span.live` -- `otel.sdk.span.started` - -Please see the [Semantic conventions for OpenTelemetry SDK metrics] documentation for more details on these metrics. - -[Semantic conventions for OpenTelemetry SDK metrics]: https://github.com/open-telemetry/semantic-conventions/blob/v1.36.0/docs/otel/sdk-metrics.md - -## Compatibility and Stability - -Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../VERSIONING.md). -These features may be removed or modified in successive version releases, including patch versions. - -When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release. -There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version. -If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support. diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/x.go deleted file mode 100644 index 2fcbbcc66ec..00000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/x.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package x documents experimental features for [go.opentelemetry.io/otel/sdk/trace]. -package x // import "go.opentelemetry.io/otel/sdk/trace/internal/x" - -import ( - "os" - "strings" -) - -// SelfObservability is an experimental feature flag that determines if SDK -// self-observability metrics are enabled. -// -// To enable this feature set the OTEL_GO_X_SELF_OBSERVABILITY environment variable -// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" -// will also enable this). -var SelfObservability = newFeature("SELF_OBSERVABILITY", func(v string) (string, bool) { - if strings.EqualFold(v, "true") { - return v, true - } - return "", false -}) - -// Feature is an experimental feature control flag. It provides a uniform way -// to interact with these feature flags and parse their values. -type Feature[T any] struct { - key string - parse func(v string) (T, bool) -} - -func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] { - const envKeyRoot = "OTEL_GO_X_" - return Feature[T]{ - key: envKeyRoot + suffix, - parse: parse, - } -} - -// Key returns the environment variable key that needs to be set to enable the -// feature. -func (f Feature[T]) Key() string { return f.key } - -// Lookup returns the user configured value for the feature and true if the -// user has enabled the feature. Otherwise, if the feature is not enabled, a -// zero-value and false are returned. -func (f Feature[T]) Lookup() (v T, ok bool) { - // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value - // - // > The SDK MUST interpret an empty value of an environment variable the - // > same way as when the variable is unset. - vRaw := os.Getenv(f.key) - if vRaw == "" { - return v, ok - } - return f.parse(vRaw) -} - -// Enabled reports whether the feature is enabled. -func (f Feature[T]) Enabled() bool { - _, ok := f.Lookup() - return ok -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go index 37ce2ac876a..d2cf4ebd3e7 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go @@ -5,29 +5,21 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" - "errors" "fmt" "sync" "sync/atomic" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/internal/global" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/sdk" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/resource" - "go.opentelemetry.io/otel/sdk/trace/internal/x" - semconv "go.opentelemetry.io/otel/semconv/v1.37.0" - "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" + "go.opentelemetry.io/otel/sdk/trace/internal/observ" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" "go.opentelemetry.io/otel/trace/noop" ) -const ( - defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer" - selfObsScopeName = "go.opentelemetry.io/otel/sdk/trace" -) +const defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer" // tracerProviderConfig. type tracerProviderConfig struct { @@ -163,19 +155,16 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T t, ok := p.namedTracer[is] if !ok { t = &tracer{ - provider: p, - instrumentationScope: is, - selfObservabilityEnabled: x.SelfObservability.Enabled(), + provider: p, + instrumentationScope: is, } - if t.selfObservabilityEnabled { - var err error - t.spanLiveMetric, t.spanStartedMetric, err = newInst() - if err != nil { - msg := "failed to create self-observability metrics for tracer: %w" - err := fmt.Errorf(msg, err) - otel.Handle(err) - } + + var err error + t.inst, err = observ.NewTracer() + if err != nil { + otel.Handle(err) } + p.namedTracer[is] = t } return t, ok @@ -201,23 +190,6 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T return t } -func newInst() (otelconv.SDKSpanLive, otelconv.SDKSpanStarted, error) { - m := otel.GetMeterProvider().Meter( - selfObsScopeName, - metric.WithInstrumentationVersion(sdk.Version()), - metric.WithSchemaURL(semconv.SchemaURL), - ) - - var err error - spanLiveMetric, e := otelconv.NewSDKSpanLive(m) - err = errors.Join(err, e) - - spanStartedMetric, e := otelconv.NewSDKSpanStarted(m) - err = errors.Join(err, e) - - return spanLiveMetric, spanStartedMetric, err -} - // RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors. func (p *TracerProvider) RegisterSpanProcessor(sp SpanProcessor) { // This check prevents calls during a shutdown. diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go index 411d9ccdd78..771e427a4c5 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go @@ -6,9 +6,12 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" "sync" + "sync/atomic" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/trace/internal/observ" + "go.opentelemetry.io/otel/trace" ) // simpleSpanProcessor is a SpanProcessor that synchronously sends all @@ -17,6 +20,8 @@ type simpleSpanProcessor struct { exporterMu sync.Mutex exporter SpanExporter stopOnce sync.Once + + inst *observ.SSP } var _ SpanProcessor = (*simpleSpanProcessor)(nil) @@ -33,11 +38,26 @@ func NewSimpleSpanProcessor(exporter SpanExporter) SpanProcessor { ssp := &simpleSpanProcessor{ exporter: exporter, } + + var err error + ssp.inst, err = observ.NewSSP(nextSimpleProcessorID()) + if err != nil { + otel.Handle(err) + } + global.Warn("SimpleSpanProcessor is not recommended for production use, consider using BatchSpanProcessor instead.") return ssp } +var simpleProcessorIDCounter atomic.Int64 + +// nextSimpleProcessorID returns an identifier for this simple span processor, +// starting with 0 and incrementing by 1 each time it is called. +func nextSimpleProcessorID() int64 { + return simpleProcessorIDCounter.Add(1) - 1 +} + // OnStart does nothing. func (*simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} @@ -46,11 +66,20 @@ func (ssp *simpleSpanProcessor) OnEnd(s ReadOnlySpan) { ssp.exporterMu.Lock() defer ssp.exporterMu.Unlock() + var err error if ssp.exporter != nil && s.SpanContext().TraceFlags().IsSampled() { - if err := ssp.exporter.ExportSpans(context.Background(), []ReadOnlySpan{s}); err != nil { + err = ssp.exporter.ExportSpans(context.Background(), []ReadOnlySpan{s}) + if err != nil { otel.Handle(err) } } + + if ssp.inst != nil { + // Add the span to the context to ensure the metric is recorded + // with the correct span context. + ctx := trace.ContextWithSpanContext(context.Background(), s.SpanContext()) + ssp.inst.SpanProcessed(ctx, err) + } } // Shutdown shuts down the exporter this SimpleSpanProcessor exports to. diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index b376051fbb8..8cfd9f62e3f 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -151,6 +151,12 @@ type recordingSpan struct { // tracer is the SDK tracer that created this span. tracer *tracer + + // origCtx is the context used when starting this span that has the + // recordingSpan instance set as the active span. If not nil, it is used + // when ending the span to ensure any metrics are recorded with a context + // containing this span without requiring an additional allocation. + origCtx context.Context } var ( @@ -158,6 +164,10 @@ var ( _ runtimeTracer = (*recordingSpan)(nil) ) +func (s *recordingSpan) setOrigCtx(ctx context.Context) { + s.origCtx = ctx +} + // SpanContext returns the SpanContext of this span. func (s *recordingSpan) SpanContext() trace.SpanContext { if s == nil { @@ -496,14 +506,15 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) { } s.mu.Unlock() - if s.tracer.selfObservabilityEnabled { - defer func() { - // Add the span to the context to ensure the metric is recorded - // with the correct span context. - ctx := trace.ContextWithSpan(context.Background(), s) - set := spanLiveSet(s.spanContext.IsSampled()) - s.tracer.spanLiveMetric.AddSet(ctx, -1, set) - }() + if s.tracer.inst.Enabled() { + ctx := s.origCtx + if ctx == nil { + // This should not happen as the origCtx should be set, but + // ensure trace information is propagated in the case of an + // error. + ctx = trace.ContextWithSpan(context.Background(), s) + } + defer s.tracer.inst.SpanEnded(ctx, s) } sps := s.tracer.provider.getSpanProcessors() diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go index bec5e209787..321d9743058 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go @@ -3,7 +3,7 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" -import "go.opentelemetry.io/otel/sdk/internal/env" +import "go.opentelemetry.io/otel/sdk/trace/internal/env" const ( // DefaultAttributeValueLengthLimit is the default maximum allowed diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go index e965c4cce86..e1d08fd4d8d 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go @@ -7,9 +7,8 @@ import ( "context" "time" - "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/instrumentation" - "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" + "go.opentelemetry.io/otel/sdk/trace/internal/observ" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" ) @@ -20,9 +19,7 @@ type tracer struct { provider *TracerProvider instrumentationScope instrumentation.Scope - selfObservabilityEnabled bool - spanLiveMetric otelconv.SDKSpanLive - spanStartedMetric otelconv.SDKSpanStarted + inst observ.Tracer } var _ trace.Tracer = &tracer{} @@ -53,10 +50,17 @@ func (tr *tracer) Start( s := tr.newSpan(ctx, name, &config) newCtx := trace.ContextWithSpan(ctx, s) - if tr.selfObservabilityEnabled { + if tr.inst.Enabled() { + if o, ok := s.(interface{ setOrigCtx(context.Context) }); ok { + // If this is a recording span, store the original context. + // This allows later retrieval of baggage and other information + // that may have been stored in the context at span start time and + // to avoid the allocation of repeatedly calling + // trace.ContextWithSpan. + o.setOrigCtx(newCtx) + } psc := trace.SpanContextFromContext(ctx) - set := spanStartedSet(psc, s) - tr.spanStartedMetric.AddSet(newCtx, 1, set) + tr.inst.SpanStarted(newCtx, psc, s) } if rw, ok := s.(ReadWriteSpan); ok && s.IsRecording() { @@ -168,12 +172,11 @@ func (tr *tracer) newRecordingSpan( s.SetAttributes(sr.Attributes...) s.SetAttributes(config.Attributes()...) - if tr.selfObservabilityEnabled { + if tr.inst.Enabled() { // Propagate any existing values from the context with the new span to // the measurement context. ctx = trace.ContextWithSpan(ctx, s) - set := spanLiveSet(s.spanContext.IsSampled()) - tr.spanLiveMetric.AddSet(ctx, 1, set) + tr.inst.SpanLive(ctx, s) } return s @@ -183,112 +186,3 @@ func (tr *tracer) newRecordingSpan( func (tr *tracer) newNonRecordingSpan(sc trace.SpanContext) nonRecordingSpan { return nonRecordingSpan{tracer: tr, sc: sc} } - -type parentState int - -const ( - parentStateNoParent parentState = iota - parentStateLocalParent - parentStateRemoteParent -) - -type samplingState int - -const ( - samplingStateDrop samplingState = iota - samplingStateRecordOnly - samplingStateRecordAndSample -) - -type spanStartedSetKey struct { - parent parentState - sampling samplingState -} - -var spanStartedSetCache = map[spanStartedSetKey]attribute.Set{ - {parentStateNoParent, samplingStateDrop}: attribute.NewSet( - otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone), - otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop), - ), - {parentStateLocalParent, samplingStateDrop}: attribute.NewSet( - otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal), - otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop), - ), - {parentStateRemoteParent, samplingStateDrop}: attribute.NewSet( - otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote), - otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop), - ), - - {parentStateNoParent, samplingStateRecordOnly}: attribute.NewSet( - otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone), - otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly), - ), - {parentStateLocalParent, samplingStateRecordOnly}: attribute.NewSet( - otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal), - otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly), - ), - {parentStateRemoteParent, samplingStateRecordOnly}: attribute.NewSet( - otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote), - otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly), - ), - - {parentStateNoParent, samplingStateRecordAndSample}: attribute.NewSet( - otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone), - otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample), - ), - {parentStateLocalParent, samplingStateRecordAndSample}: attribute.NewSet( - otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal), - otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample), - ), - {parentStateRemoteParent, samplingStateRecordAndSample}: attribute.NewSet( - otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote), - otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample), - ), -} - -func spanStartedSet(psc trace.SpanContext, span trace.Span) attribute.Set { - key := spanStartedSetKey{ - parent: parentStateNoParent, - sampling: samplingStateDrop, - } - - if psc.IsValid() { - if psc.IsRemote() { - key.parent = parentStateRemoteParent - } else { - key.parent = parentStateLocalParent - } - } - - if span.IsRecording() { - if span.SpanContext().IsSampled() { - key.sampling = samplingStateRecordAndSample - } else { - key.sampling = samplingStateRecordOnly - } - } - - return spanStartedSetCache[key] -} - -type spanLiveSetKey struct { - sampled bool -} - -var spanLiveSetCache = map[spanLiveSetKey]attribute.Set{ - {true}: attribute.NewSet( - otelconv.SDKSpanLive{}.AttrSpanSamplingResult( - otelconv.SpanSamplingResultRecordAndSample, - ), - ), - {false}: attribute.NewSet( - otelconv.SDKSpanLive{}.AttrSpanSamplingResult( - otelconv.SpanSamplingResultRecordOnly, - ), - ), -} - -func spanLiveSet(sampled bool) attribute.Set { - key := spanLiveSetKey{sampled: sampled} - return spanLiveSetCache[key] -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go index 7f97cc31e51..0a3b3661910 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -6,5 +6,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.38.0" + return "1.39.0" } diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/http.go b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go index 58b5eddef66..f18d6e3f227 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/internal/http.go +++ b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go @@ -121,7 +121,7 @@ func hostIPNamePort(hostWithPort string) (ip, name string, port int) { if parsedPort, err = strconv.ParseUint(portPart, 10, 16); err == nil { port = int(parsedPort) // nolint: gosec // Bit size of 16 checked above. } - return + return ip, name, port } // EndUserAttributesFromHTTPRequest generates attributes of the diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md deleted file mode 100644 index 2de1fc3c6be..00000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Semconv v1.26.0 - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.26.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.26.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go deleted file mode 100644 index d8dc822b263..00000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go +++ /dev/null @@ -1,8996 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" - -import "go.opentelemetry.io/otel/attribute" - -// The Android platform on which the Android application is running. -const ( - // AndroidOSAPILevelKey is the attribute Key conforming to the - // "android.os.api_level" semantic conventions. It represents the uniquely - // identifies the framework API revision offered by a version - // (`os.version`) of the android operating system. More information can be - // found - // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '33', '32' - AndroidOSAPILevelKey = attribute.Key("android.os.api_level") -) - -// AndroidOSAPILevel returns an attribute KeyValue conforming to the -// "android.os.api_level" semantic conventions. It represents the uniquely -// identifies the framework API revision offered by a version (`os.version`) of -// the android operating system. More information can be found -// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). -func AndroidOSAPILevel(val string) attribute.KeyValue { - return AndroidOSAPILevelKey.String(val) -} - -// ASP.NET Core attributes -const ( - // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the - // "aspnetcore.rate_limiting.result" semantic conventions. It represents - // the rate-limiting result, shows whether the lease was acquired or - // contains a rejection reason - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - // Examples: 'acquired', 'request_canceled' - AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result") - - // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to - // the "aspnetcore.diagnostics.handler.type" semantic conventions. It - // represents the full type name of the - // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) - // implementation that handled the exception. - // - // Type: string - // RequirementLevel: ConditionallyRequired (if and only if the exception - // was handled by this handler.) - // Stability: stable - // Examples: 'Contoso.MyHandler' - AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type") - - // AspnetcoreDiagnosticsExceptionResultKey is the attribute Key conforming - // to the "aspnetcore.diagnostics.exception.result" semantic conventions. - // It represents the aSP.NET Core exception middleware handling result - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'handled', 'unhandled' - AspnetcoreDiagnosticsExceptionResultKey = attribute.Key("aspnetcore.diagnostics.exception.result") - - // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the - // "aspnetcore.rate_limiting.policy" semantic conventions. It represents - // the rate limiting policy name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'fixed', 'sliding', 'token' - AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy") - - // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the - // "aspnetcore.request.is_unhandled" semantic conventions. It represents - // the flag indicating if request was handled by the application pipeline. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - // Examples: True - AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled") - - // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the - // "aspnetcore.routing.is_fallback" semantic conventions. It represents a - // value that indicates whether the matched route is a fallback route. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - // Examples: True - AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback") - - // AspnetcoreRoutingMatchStatusKey is the attribute Key conforming to the - // "aspnetcore.routing.match_status" semantic conventions. It represents - // the match result - success or failure - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'success', 'failure' - AspnetcoreRoutingMatchStatusKey = attribute.Key("aspnetcore.routing.match_status") -) - -var ( - // Lease was acquired - AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired") - // Lease request was rejected by the endpoint limiter - AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter") - // Lease request was rejected by the global limiter - AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter") - // Lease request was canceled - AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled") -) - -var ( - // Exception was handled by the exception handling middleware - AspnetcoreDiagnosticsExceptionResultHandled = AspnetcoreDiagnosticsExceptionResultKey.String("handled") - // Exception was not handled by the exception handling middleware - AspnetcoreDiagnosticsExceptionResultUnhandled = AspnetcoreDiagnosticsExceptionResultKey.String("unhandled") - // Exception handling was skipped because the response had started - AspnetcoreDiagnosticsExceptionResultSkipped = AspnetcoreDiagnosticsExceptionResultKey.String("skipped") - // Exception handling didn't run because the request was aborted - AspnetcoreDiagnosticsExceptionResultAborted = AspnetcoreDiagnosticsExceptionResultKey.String("aborted") -) - -var ( - // Match succeeded - AspnetcoreRoutingMatchStatusSuccess = AspnetcoreRoutingMatchStatusKey.String("success") - // Match failed - AspnetcoreRoutingMatchStatusFailure = AspnetcoreRoutingMatchStatusKey.String("failure") -) - -// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming -// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It -// represents the full type name of the -// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) -// implementation that handled the exception. -func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue { - return AspnetcoreDiagnosticsHandlerTypeKey.String(val) -} - -// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to -// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents -// the rate limiting policy name. -func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue { - return AspnetcoreRateLimitingPolicyKey.String(val) -} - -// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to -// the "aspnetcore.request.is_unhandled" semantic conventions. It represents -// the flag indicating if request was handled by the application pipeline. -func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue { - return AspnetcoreRequestIsUnhandledKey.Bool(val) -} - -// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to -// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a -// value that indicates whether the matched route is a fallback route. -func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue { - return AspnetcoreRoutingIsFallbackKey.Bool(val) -} - -// Generic attributes for AWS services. -const ( - // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" - // semantic conventions. It represents the AWS request ID as returned in - // the response headers `x-amz-request-id` or `x-amz-requestid`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' - AWSRequestIDKey = attribute.Key("aws.request_id") -) - -// AWSRequestID returns an attribute KeyValue conforming to the -// "aws.request_id" semantic conventions. It represents the AWS request ID as -// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. -func AWSRequestID(val string) attribute.KeyValue { - return AWSRequestIDKey.String(val) -} - -// Attributes for AWS DynamoDB. -const ( - // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to - // the "aws.dynamodb.attribute_definitions" semantic conventions. It - // represents the JSON-serialized value of each item in the - // `AttributeDefinitions` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' - AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") - - // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the - // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the - // value of the `AttributesToGet` request parameter. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'lives', 'id' - AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") - - // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the - // "aws.dynamodb.consistent_read" semantic conventions. It represents the - // value of the `ConsistentRead` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") - - // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the - // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the - // JSON-serialized value of each item in the `ConsumedCapacity` response - // field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { - // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number }, "TableName": "string", - // "WriteCapacityUnits": number }' - AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") - - // AWSDynamoDBCountKey is the attribute Key conforming to the - // "aws.dynamodb.count" semantic conventions. It represents the value of - // the `Count` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") - - // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the - // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents - // the value of the `ExclusiveStartTableName` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Users', 'CatsTable' - AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") - - // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key - // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic - // conventions. It represents the JSON-serialized value of each item in the - // `GlobalSecondaryIndexUpdates` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, - // "ProvisionedThroughput": { "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") - - // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `GlobalSecondaryIndexes` request field - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": - // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ - // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { - // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") - - // AWSDynamoDBIndexNameKey is the attribute Key conforming to the - // "aws.dynamodb.index_name" semantic conventions. It represents the value - // of the `IndexName` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'name_to_group' - AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") - - // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to - // the "aws.dynamodb.item_collection_metrics" semantic conventions. It - // represents the JSON-serialized value of the `ItemCollectionMetrics` - // response field. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": - // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { - // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], - // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, - // "SizeEstimateRangeGB": [ number ] } ] }' - AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") - - // AWSDynamoDBLimitKey is the attribute Key conforming to the - // "aws.dynamodb.limit" semantic conventions. It represents the value of - // the `Limit` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") - - // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `LocalSecondaryIndexes` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "IndexARN": "string", "IndexName": "string", - // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' - AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") - - // AWSDynamoDBProjectionKey is the attribute Key conforming to the - // "aws.dynamodb.projection" semantic conventions. It represents the value - // of the `ProjectionExpression` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Title', 'Title, Price, Color', 'Title, Description, - // RelatedItems, ProductReviews' - AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") - - // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to - // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It - // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` - // request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") - - // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming - // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. - // It represents the value of the - // `ProvisionedThroughput.WriteCapacityUnits` request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") - - // AWSDynamoDBScanForwardKey is the attribute Key conforming to the - // "aws.dynamodb.scan_forward" semantic conventions. It represents the - // value of the `ScanIndexForward` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") - - // AWSDynamoDBScannedCountKey is the attribute Key conforming to the - // "aws.dynamodb.scanned_count" semantic conventions. It represents the - // value of the `ScannedCount` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 50 - AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") - - // AWSDynamoDBSegmentKey is the attribute Key conforming to the - // "aws.dynamodb.segment" semantic conventions. It represents the value of - // the `Segment` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") - - // AWSDynamoDBSelectKey is the attribute Key conforming to the - // "aws.dynamodb.select" semantic conventions. It represents the value of - // the `Select` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ALL_ATTRIBUTES', 'COUNT' - AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") - - // AWSDynamoDBTableCountKey is the attribute Key conforming to the - // "aws.dynamodb.table_count" semantic conventions. It represents the - // number of items in the `TableNames` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 20 - AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") - - // AWSDynamoDBTableNamesKey is the attribute Key conforming to the - // "aws.dynamodb.table_names" semantic conventions. It represents the keys - // in the `RequestItems` object field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Users', 'Cats' - AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") - - // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the - // "aws.dynamodb.total_segments" semantic conventions. It represents the - // value of the `TotalSegments` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 100 - AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") -) - -// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming -// to the "aws.dynamodb.attribute_definitions" semantic conventions. It -// represents the JSON-serialized value of each item in the -// `AttributeDefinitions` request field. -func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) -} - -// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to -// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the -// value of the `AttributesToGet` request parameter. -func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributesToGetKey.StringSlice(val) -} - -// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the -// "aws.dynamodb.consistent_read" semantic conventions. It represents the value -// of the `ConsistentRead` request parameter. -func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { - return AWSDynamoDBConsistentReadKey.Bool(val) -} - -// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to -// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the -// JSON-serialized value of each item in the `ConsumedCapacity` response field. -func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { - return AWSDynamoDBConsumedCapacityKey.StringSlice(val) -} - -// AWSDynamoDBCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.count" semantic conventions. It represents the value of the -// `Count` response parameter. -func AWSDynamoDBCount(val int) attribute.KeyValue { - return AWSDynamoDBCountKey.Int(val) -} - -// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming -// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It -// represents the value of the `ExclusiveStartTableName` request parameter. -func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { - return AWSDynamoDBExclusiveStartTableKey.String(val) -} - -// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic -// conventions. It represents the JSON-serialized value of each item in the -// `GlobalSecondaryIndexUpdates` request field. -func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) -} - -// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_indexes" semantic -// conventions. It represents the JSON-serialized value of each item of the -// `GlobalSecondaryIndexes` request field -func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) -} - -// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the -// "aws.dynamodb.index_name" semantic conventions. It represents the value of -// the `IndexName` request parameter. -func AWSDynamoDBIndexName(val string) attribute.KeyValue { - return AWSDynamoDBIndexNameKey.String(val) -} - -// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming -// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It -// represents the JSON-serialized value of the `ItemCollectionMetrics` response -// field. -func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { - return AWSDynamoDBItemCollectionMetricsKey.String(val) -} - -// AWSDynamoDBLimit returns an attribute KeyValue conforming to the -// "aws.dynamodb.limit" semantic conventions. It represents the value of the -// `Limit` request parameter. -func AWSDynamoDBLimit(val int) attribute.KeyValue { - return AWSDynamoDBLimitKey.Int(val) -} - -// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming -// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It -// represents the JSON-serialized value of each item of the -// `LocalSecondaryIndexes` request field. -func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) -} - -// AWSDynamoDBProjection returns an attribute KeyValue conforming to the -// "aws.dynamodb.projection" semantic conventions. It represents the value of -// the `ProjectionExpression` request parameter. -func AWSDynamoDBProjection(val string) attribute.KeyValue { - return AWSDynamoDBProjectionKey.String(val) -} - -// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue -// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic -// conventions. It represents the value of the -// `ProvisionedThroughput.ReadCapacityUnits` request parameter. -func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) -} - -// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue -// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic -// conventions. It represents the value of the -// `ProvisionedThroughput.WriteCapacityUnits` request parameter. -func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) -} - -// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the -// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of -// the `ScanIndexForward` request parameter. -func AWSDynamoDBScanForward(val bool) attribute.KeyValue { - return AWSDynamoDBScanForwardKey.Bool(val) -} - -// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.scanned_count" semantic conventions. It represents the value -// of the `ScannedCount` response parameter. -func AWSDynamoDBScannedCount(val int) attribute.KeyValue { - return AWSDynamoDBScannedCountKey.Int(val) -} - -// AWSDynamoDBSegment returns an attribute KeyValue conforming to the -// "aws.dynamodb.segment" semantic conventions. It represents the value of the -// `Segment` request parameter. -func AWSDynamoDBSegment(val int) attribute.KeyValue { - return AWSDynamoDBSegmentKey.Int(val) -} - -// AWSDynamoDBSelect returns an attribute KeyValue conforming to the -// "aws.dynamodb.select" semantic conventions. It represents the value of the -// `Select` request parameter. -func AWSDynamoDBSelect(val string) attribute.KeyValue { - return AWSDynamoDBSelectKey.String(val) -} - -// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_count" semantic conventions. It represents the number of -// items in the `TableNames` response parameter. -func AWSDynamoDBTableCount(val int) attribute.KeyValue { - return AWSDynamoDBTableCountKey.Int(val) -} - -// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_names" semantic conventions. It represents the keys in -// the `RequestItems` object field. -func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { - return AWSDynamoDBTableNamesKey.StringSlice(val) -} - -// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the -// "aws.dynamodb.total_segments" semantic conventions. It represents the value -// of the `TotalSegments` request parameter. -func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { - return AWSDynamoDBTotalSegmentsKey.Int(val) -} - -// Attributes for AWS Elastic Container Service (ECS). -const ( - // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id" - // semantic conventions. It represents the ID of a running ECS task. The ID - // MUST be extracted from `task.arn`. - // - // Type: string - // RequirementLevel: ConditionallyRequired (If and only if `task.arn` is - // populated.) - // Stability: experimental - // Examples: '10838bed-421f-43ef-870a-f43feacbbb5b', - // '23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' - AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id") - - // AWSECSClusterARNKey is the attribute Key conforming to the - // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an - // [ECS - // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") - - // AWSECSContainerARNKey is the attribute Key conforming to the - // "aws.ecs.container.arn" semantic conventions. It represents the Amazon - // Resource Name (ARN) of an [ECS container - // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' - AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") - - // AWSECSLaunchtypeKey is the attribute Key conforming to the - // "aws.ecs.launchtype" semantic conventions. It represents the [launch - // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) - // for an ECS task. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") - - // AWSECSTaskARNKey is the attribute Key conforming to the - // "aws.ecs.task.arn" semantic conventions. It represents the ARN of a - // running [ECS - // task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b', - // 'arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' - AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") - - // AWSECSTaskFamilyKey is the attribute Key conforming to the - // "aws.ecs.task.family" semantic conventions. It represents the family - // name of the [ECS task - // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) - // used to create the ECS task. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-family' - AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") - - // AWSECSTaskRevisionKey is the attribute Key conforming to the - // "aws.ecs.task.revision" semantic conventions. It represents the revision - // for the task definition used to create the ECS task. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '8', '26' - AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") -) - -var ( - // ec2 - AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") - // fargate - AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") -) - -// AWSECSTaskID returns an attribute KeyValue conforming to the -// "aws.ecs.task.id" semantic conventions. It represents the ID of a running -// ECS task. The ID MUST be extracted from `task.arn`. -func AWSECSTaskID(val string) attribute.KeyValue { - return AWSECSTaskIDKey.String(val) -} - -// AWSECSClusterARN returns an attribute KeyValue conforming to the -// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS -// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). -func AWSECSClusterARN(val string) attribute.KeyValue { - return AWSECSClusterARNKey.String(val) -} - -// AWSECSContainerARN returns an attribute KeyValue conforming to the -// "aws.ecs.container.arn" semantic conventions. It represents the Amazon -// Resource Name (ARN) of an [ECS container -// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). -func AWSECSContainerARN(val string) attribute.KeyValue { - return AWSECSContainerARNKey.String(val) -} - -// AWSECSTaskARN returns an attribute KeyValue conforming to the -// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running -// [ECS -// task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). -func AWSECSTaskARN(val string) attribute.KeyValue { - return AWSECSTaskARNKey.String(val) -} - -// AWSECSTaskFamily returns an attribute KeyValue conforming to the -// "aws.ecs.task.family" semantic conventions. It represents the family name of -// the [ECS task -// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) -// used to create the ECS task. -func AWSECSTaskFamily(val string) attribute.KeyValue { - return AWSECSTaskFamilyKey.String(val) -} - -// AWSECSTaskRevision returns an attribute KeyValue conforming to the -// "aws.ecs.task.revision" semantic conventions. It represents the revision for -// the task definition used to create the ECS task. -func AWSECSTaskRevision(val string) attribute.KeyValue { - return AWSECSTaskRevisionKey.String(val) -} - -// Attributes for AWS Elastic Kubernetes Service (EKS). -const ( - // AWSEKSClusterARNKey is the attribute Key conforming to the - // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an - // EKS cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") -) - -// AWSEKSClusterARN returns an attribute KeyValue conforming to the -// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS -// cluster. -func AWSEKSClusterARN(val string) attribute.KeyValue { - return AWSEKSClusterARNKey.String(val) -} - -// Attributes for AWS Logs. -const ( - // AWSLogGroupARNsKey is the attribute Key conforming to the - // "aws.log.group.arns" semantic conventions. It represents the Amazon - // Resource Name(s) (ARN) of the AWS log group(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' - // Note: See the [log group ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") - - // AWSLogGroupNamesKey is the attribute Key conforming to the - // "aws.log.group.names" semantic conventions. It represents the name(s) of - // the AWS log group(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/aws/lambda/my-function', 'opentelemetry-service' - // Note: Multiple log groups must be supported for cases like - // multi-container applications, where a single application has sidecar - // containers, and each write to their own log group. - AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") - - // AWSLogStreamARNsKey is the attribute Key conforming to the - // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of - // the AWS log stream(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - // Note: See the [log stream ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - // One log group can contain several log streams, so these ARNs necessarily - // identify both a log group and a log stream. - AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") - - // AWSLogStreamNamesKey is the attribute Key conforming to the - // "aws.log.stream.names" semantic conventions. It represents the name(s) - // of the AWS log stream(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") -) - -// AWSLogGroupARNs returns an attribute KeyValue conforming to the -// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource -// Name(s) (ARN) of the AWS log group(s). -func AWSLogGroupARNs(val ...string) attribute.KeyValue { - return AWSLogGroupARNsKey.StringSlice(val) -} - -// AWSLogGroupNames returns an attribute KeyValue conforming to the -// "aws.log.group.names" semantic conventions. It represents the name(s) of the -// AWS log group(s) an application is writing to. -func AWSLogGroupNames(val ...string) attribute.KeyValue { - return AWSLogGroupNamesKey.StringSlice(val) -} - -// AWSLogStreamARNs returns an attribute KeyValue conforming to the -// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the -// AWS log stream(s). -func AWSLogStreamARNs(val ...string) attribute.KeyValue { - return AWSLogStreamARNsKey.StringSlice(val) -} - -// AWSLogStreamNames returns an attribute KeyValue conforming to the -// "aws.log.stream.names" semantic conventions. It represents the name(s) of -// the AWS log stream(s) an application is writing to. -func AWSLogStreamNames(val ...string) attribute.KeyValue { - return AWSLogStreamNamesKey.StringSlice(val) -} - -// Attributes for AWS Lambda. -const ( - // AWSLambdaInvokedARNKey is the attribute Key conforming to the - // "aws.lambda.invoked_arn" semantic conventions. It represents the full - // invoked ARN as provided on the `Context` passed to the function - // (`Lambda-Runtime-Invoked-Function-ARN` header on the - // `/runtime/invocation/next` applicable). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' - // Note: This may be different from `cloud.resource_id` if an alias is - // involved. - AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") -) - -// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the -// "aws.lambda.invoked_arn" semantic conventions. It represents the full -// invoked ARN as provided on the `Context` passed to the function -// (`Lambda-Runtime-Invoked-Function-ARN` header on the -// `/runtime/invocation/next` applicable). -func AWSLambdaInvokedARN(val string) attribute.KeyValue { - return AWSLambdaInvokedARNKey.String(val) -} - -// Attributes for AWS S3. -const ( - // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" - // semantic conventions. It represents the S3 bucket name the request - // refers to. Corresponds to the `--bucket` parameter of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // operations. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'some-bucket-name' - // Note: The `bucket` attribute is applicable to all S3 operations that - // reference a bucket, i.e. that require the bucket name as a mandatory - // parameter. - // This applies to almost all S3 operations except `list-buckets`. - AWSS3BucketKey = attribute.Key("aws.s3.bucket") - - // AWSS3CopySourceKey is the attribute Key conforming to the - // "aws.s3.copy_source" semantic conventions. It represents the source - // object (in the form `bucket`/`key`) for the copy operation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'someFile.yml' - // Note: The `copy_source` attribute applies to S3 copy operations and - // corresponds to the `--copy-source` parameter - // of the [copy-object operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). - // This applies in particular to the following operations: - // - // - - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") - - // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" - // semantic conventions. It represents the delete request container that - // specifies the objects to be deleted. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' - // Note: The `delete` attribute is only applicable to the - // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) - // operation. - // The `delete` attribute corresponds to the `--delete` parameter of the - // [delete-objects operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). - AWSS3DeleteKey = attribute.Key("aws.s3.delete") - - // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic - // conventions. It represents the S3 object key the request refers to. - // Corresponds to the `--key` parameter of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // operations. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'someFile.yml' - // Note: The `key` attribute is applicable to all object-related S3 - // operations, i.e. that require the object key as a mandatory parameter. - // This applies in particular to the following operations: - // - // - - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - // - - // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) - // - - // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) - // - - // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) - // - - // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) - // - - // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) - // - - // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) - // - - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - // - - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - // - - // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) - // - - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - // - - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3KeyKey = attribute.Key("aws.s3.key") - - // AWSS3PartNumberKey is the attribute Key conforming to the - // "aws.s3.part_number" semantic conventions. It represents the part number - // of the part being uploaded in a multipart-upload operation. This is a - // positive integer between 1 and 10,000. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3456 - // Note: The `part_number` attribute is only applicable to the - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // and - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - // operations. - // The `part_number` attribute corresponds to the `--part-number` parameter - // of the - // [upload-part operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). - AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") - - // AWSS3UploadIDKey is the attribute Key conforming to the - // "aws.s3.upload_id" semantic conventions. It represents the upload ID - // that identifies the multipart upload. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' - // Note: The `upload_id` attribute applies to S3 multipart-upload - // operations and corresponds to the `--upload-id` parameter - // of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // multipart operations. - // This applies in particular to the following operations: - // - // - - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - // - - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - // - - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - // - - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") -) - -// AWSS3Bucket returns an attribute KeyValue conforming to the -// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the -// request refers to. Corresponds to the `--bucket` parameter of the [S3 -// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) -// operations. -func AWSS3Bucket(val string) attribute.KeyValue { - return AWSS3BucketKey.String(val) -} - -// AWSS3CopySource returns an attribute KeyValue conforming to the -// "aws.s3.copy_source" semantic conventions. It represents the source object -// (in the form `bucket`/`key`) for the copy operation. -func AWSS3CopySource(val string) attribute.KeyValue { - return AWSS3CopySourceKey.String(val) -} - -// AWSS3Delete returns an attribute KeyValue conforming to the -// "aws.s3.delete" semantic conventions. It represents the delete request -// container that specifies the objects to be deleted. -func AWSS3Delete(val string) attribute.KeyValue { - return AWSS3DeleteKey.String(val) -} - -// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" -// semantic conventions. It represents the S3 object key the request refers to. -// Corresponds to the `--key` parameter of the [S3 -// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) -// operations. -func AWSS3Key(val string) attribute.KeyValue { - return AWSS3KeyKey.String(val) -} - -// AWSS3PartNumber returns an attribute KeyValue conforming to the -// "aws.s3.part_number" semantic conventions. It represents the part number of -// the part being uploaded in a multipart-upload operation. This is a positive -// integer between 1 and 10,000. -func AWSS3PartNumber(val int) attribute.KeyValue { - return AWSS3PartNumberKey.Int(val) -} - -// AWSS3UploadID returns an attribute KeyValue conforming to the -// "aws.s3.upload_id" semantic conventions. It represents the upload ID that -// identifies the multipart upload. -func AWSS3UploadID(val string) attribute.KeyValue { - return AWSS3UploadIDKey.String(val) -} - -// The web browser attributes -const ( - // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" - // semantic conventions. It represents the array of brand name and version - // separated by a space - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.brands`). - BrowserBrandsKey = attribute.Key("browser.brands") - - // BrowserLanguageKey is the attribute Key conforming to the - // "browser.language" semantic conventions. It represents the preferred - // language of the user using the browser - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'en', 'en-US', 'fr', 'fr-FR' - // Note: This value is intended to be taken from the Navigator API - // `navigator.language`. - BrowserLanguageKey = attribute.Key("browser.language") - - // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" - // semantic conventions. It represents a boolean that is true if the - // browser is running on a mobile device - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.mobile`). If unavailable, this attribute - // SHOULD be left unset. - BrowserMobileKey = attribute.Key("browser.mobile") - - // BrowserPlatformKey is the attribute Key conforming to the - // "browser.platform" semantic conventions. It represents the platform on - // which the browser is running - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Windows', 'macOS', 'Android' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.platform`). If unavailable, the legacy - // `navigator.platform` API SHOULD NOT be used instead and this attribute - // SHOULD be left unset in order for the values to be consistent. - // The list of possible values is defined in the [W3C User-Agent Client - // Hints - // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). - // Note that some (but not all) of these values can overlap with values in - // the [`os.type` and `os.name` attributes](./os.md). However, for - // consistency, the values in the `browser.platform` attribute should - // capture the exact value that the user agent provides. - BrowserPlatformKey = attribute.Key("browser.platform") -) - -// BrowserBrands returns an attribute KeyValue conforming to the -// "browser.brands" semantic conventions. It represents the array of brand name -// and version separated by a space -func BrowserBrands(val ...string) attribute.KeyValue { - return BrowserBrandsKey.StringSlice(val) -} - -// BrowserLanguage returns an attribute KeyValue conforming to the -// "browser.language" semantic conventions. It represents the preferred -// language of the user using the browser -func BrowserLanguage(val string) attribute.KeyValue { - return BrowserLanguageKey.String(val) -} - -// BrowserMobile returns an attribute KeyValue conforming to the -// "browser.mobile" semantic conventions. It represents a boolean that is true -// if the browser is running on a mobile device -func BrowserMobile(val bool) attribute.KeyValue { - return BrowserMobileKey.Bool(val) -} - -// BrowserPlatform returns an attribute KeyValue conforming to the -// "browser.platform" semantic conventions. It represents the platform on which -// the browser is running -func BrowserPlatform(val string) attribute.KeyValue { - return BrowserPlatformKey.String(val) -} - -// These attributes may be used to describe the client in a connection-based -// network interaction where there is one side that initiates the connection -// (the client is the side that initiates the connection). This covers all TCP -// network interactions since TCP is connection-based and one side initiates -// the connection (an exception is made for peer-to-peer communication over TCP -// where the "user-facing" surface of the protocol / API doesn't expose a clear -// notion of client and server). This also covers UDP network interactions -// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. -const ( - // ClientAddressKey is the attribute Key conforming to the "client.address" - // semantic conventions. It represents the client address - domain name if - // available without reverse DNS lookup; otherwise, IP address or Unix - // domain socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the server side, and when communicating through - // an intermediary, `client.address` SHOULD represent the client address - // behind any intermediaries, for example proxies, if it's available. - ClientAddressKey = attribute.Key("client.address") - - // ClientPortKey is the attribute Key conforming to the "client.port" - // semantic conventions. It represents the client port number. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - // Note: When observed from the server side, and when communicating through - // an intermediary, `client.port` SHOULD represent the client port behind - // any intermediaries, for example proxies, if it's available. - ClientPortKey = attribute.Key("client.port") -) - -// ClientAddress returns an attribute KeyValue conforming to the -// "client.address" semantic conventions. It represents the client address - -// domain name if available without reverse DNS lookup; otherwise, IP address -// or Unix domain socket name. -func ClientAddress(val string) attribute.KeyValue { - return ClientAddressKey.String(val) -} - -// ClientPort returns an attribute KeyValue conforming to the "client.port" -// semantic conventions. It represents the client port number. -func ClientPort(val int) attribute.KeyValue { - return ClientPortKey.Int(val) -} - -// A cloud environment (e.g. GCP, Azure, AWS). -const ( - // CloudAccountIDKey is the attribute Key conforming to the - // "cloud.account.id" semantic conventions. It represents the cloud account - // ID the resource is assigned to. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '111111111111', 'opentelemetry' - CloudAccountIDKey = attribute.Key("cloud.account.id") - - // CloudAvailabilityZoneKey is the attribute Key conforming to the - // "cloud.availability_zone" semantic conventions. It represents the cloud - // regions often have multiple, isolated locations known as zones to - // increase availability. Availability zone represents the zone where the - // resource is running. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'us-east-1c' - // Note: Availability zones are called "zones" on Alibaba Cloud and Google - // Cloud. - CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") - - // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" - // semantic conventions. It represents the cloud platform in use. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The prefix of the service SHOULD match the one specified in - // `cloud.provider`. - CloudPlatformKey = attribute.Key("cloud.platform") - - // CloudProviderKey is the attribute Key conforming to the "cloud.provider" - // semantic conventions. It represents the name of the cloud provider. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - CloudProviderKey = attribute.Key("cloud.provider") - - // CloudRegionKey is the attribute Key conforming to the "cloud.region" - // semantic conventions. It represents the geographical region the resource - // is running. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'us-central1', 'us-east-1' - // Note: Refer to your provider's docs to see the available regions, for - // example [Alibaba Cloud - // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS - // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), - // [Azure - // regions](https://azure.microsoft.com/global-infrastructure/geographies/), - // [Google Cloud regions](https://cloud.google.com/about/locations), or - // [Tencent Cloud - // regions](https://www.tencentcloud.com/document/product/213/6091). - CloudRegionKey = attribute.Key("cloud.region") - - // CloudResourceIDKey is the attribute Key conforming to the - // "cloud.resource_id" semantic conventions. It represents the cloud - // provider-specific native identifier of the monitored cloud resource - // (e.g. an - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // on AWS, a [fully qualified resource - // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) - // on Azure, a [full resource - // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) - // on GCP) - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', - // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', - // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' - // Note: On some cloud providers, it may not be possible to determine the - // full ID at startup, - // so it may be necessary to set `cloud.resource_id` as a span attribute - // instead. - // - // The exact value to use for `cloud.resource_id` depends on the cloud - // provider. - // The following well-known definitions MUST be used if you set this - // attribute and they apply: - // - // * **AWS Lambda:** The function - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). - // Take care not to use the "invoked ARN" directly but replace any - // [alias - // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) - // with the resolved function version, as the same runtime instance may - // be invokable with - // multiple different aliases. - // * **GCP:** The [URI of the - // resource](https://cloud.google.com/iam/docs/full-resource-names) - // * **Azure:** The [Fully Qualified Resource - // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id) - // of the invoked function, - // *not* the function app, having the form - // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider. - CloudResourceIDKey = attribute.Key("cloud.resource_id") -) - -var ( - // Alibaba Cloud Elastic Compute Service - CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") - // Alibaba Cloud Function Compute - CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") - // Red Hat OpenShift on Alibaba Cloud - CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") - // AWS Elastic Compute Cloud - CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") - // AWS Elastic Container Service - CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") - // AWS Elastic Kubernetes Service - CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") - // AWS Lambda - CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") - // AWS Elastic Beanstalk - CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") - // AWS App Runner - CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") - // Red Hat OpenShift on AWS (ROSA) - CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") - // Azure Virtual Machines - CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") - // Azure Container Apps - CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps") - // Azure Container Instances - CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") - // Azure Kubernetes Service - CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") - // Azure Functions - CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") - // Azure App Service - CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") - // Azure Red Hat OpenShift - CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") - // Google Bare Metal Solution (BMS) - CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") - // Google Cloud Compute Engine (GCE) - CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") - // Google Cloud Run - CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") - // Google Cloud Kubernetes Engine (GKE) - CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") - // Google Cloud Functions (GCF) - CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") - // Google Cloud App Engine (GAE) - CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") - // Red Hat OpenShift on Google Cloud - CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") - // Red Hat OpenShift on IBM Cloud - CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") - // Tencent Cloud Cloud Virtual Machine (CVM) - CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") - // Tencent Cloud Elastic Kubernetes Service (EKS) - CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") - // Tencent Cloud Serverless Cloud Function (SCF) - CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") -) - -var ( - // Alibaba Cloud - CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") - // Amazon Web Services - CloudProviderAWS = CloudProviderKey.String("aws") - // Microsoft Azure - CloudProviderAzure = CloudProviderKey.String("azure") - // Google Cloud Platform - CloudProviderGCP = CloudProviderKey.String("gcp") - // Heroku Platform as a Service - CloudProviderHeroku = CloudProviderKey.String("heroku") - // IBM Cloud - CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") - // Tencent Cloud - CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") -) - -// CloudAccountID returns an attribute KeyValue conforming to the -// "cloud.account.id" semantic conventions. It represents the cloud account ID -// the resource is assigned to. -func CloudAccountID(val string) attribute.KeyValue { - return CloudAccountIDKey.String(val) -} - -// CloudAvailabilityZone returns an attribute KeyValue conforming to the -// "cloud.availability_zone" semantic conventions. It represents the cloud -// regions often have multiple, isolated locations known as zones to increase -// availability. Availability zone represents the zone where the resource is -// running. -func CloudAvailabilityZone(val string) attribute.KeyValue { - return CloudAvailabilityZoneKey.String(val) -} - -// CloudRegion returns an attribute KeyValue conforming to the -// "cloud.region" semantic conventions. It represents the geographical region -// the resource is running. -func CloudRegion(val string) attribute.KeyValue { - return CloudRegionKey.String(val) -} - -// CloudResourceID returns an attribute KeyValue conforming to the -// "cloud.resource_id" semantic conventions. It represents the cloud -// provider-specific native identifier of the monitored cloud resource (e.g. an -// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) -// on AWS, a [fully qualified resource -// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on -// Azure, a [full resource -// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) -// on GCP) -func CloudResourceID(val string) attribute.KeyValue { - return CloudResourceIDKey.String(val) -} - -// Attributes for CloudEvents. -const ( - // CloudeventsEventIDKey is the attribute Key conforming to the - // "cloudevents.event_id" semantic conventions. It represents the - // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) - // uniquely identifies the event. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' - CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") - - // CloudeventsEventSourceKey is the attribute Key conforming to the - // "cloudevents.event_source" semantic conventions. It represents the - // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) - // identifies the context in which an event happened. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'https://github.com/cloudevents', - // '/cloudevents/spec/pull/123', 'my-service' - CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") - - // CloudeventsEventSpecVersionKey is the attribute Key conforming to the - // "cloudevents.event_spec_version" semantic conventions. It represents the - // [version of the CloudEvents - // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) - // which the event uses. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.0' - CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") - - // CloudeventsEventSubjectKey is the attribute Key conforming to the - // "cloudevents.event_subject" semantic conventions. It represents the - // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) - // of the event in the context of the event producer (identified by - // source). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mynewfile.jpg' - CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") - - // CloudeventsEventTypeKey is the attribute Key conforming to the - // "cloudevents.event_type" semantic conventions. It represents the - // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) - // contains a value describing the type of event related to the originating - // occurrence. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'com.github.pull_request.opened', - // 'com.example.object.deleted.v2' - CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") -) - -// CloudeventsEventID returns an attribute KeyValue conforming to the -// "cloudevents.event_id" semantic conventions. It represents the -// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) -// uniquely identifies the event. -func CloudeventsEventID(val string) attribute.KeyValue { - return CloudeventsEventIDKey.String(val) -} - -// CloudeventsEventSource returns an attribute KeyValue conforming to the -// "cloudevents.event_source" semantic conventions. It represents the -// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) -// identifies the context in which an event happened. -func CloudeventsEventSource(val string) attribute.KeyValue { - return CloudeventsEventSourceKey.String(val) -} - -// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to -// the "cloudevents.event_spec_version" semantic conventions. It represents the -// [version of the CloudEvents -// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) -// which the event uses. -func CloudeventsEventSpecVersion(val string) attribute.KeyValue { - return CloudeventsEventSpecVersionKey.String(val) -} - -// CloudeventsEventSubject returns an attribute KeyValue conforming to the -// "cloudevents.event_subject" semantic conventions. It represents the -// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) -// of the event in the context of the event producer (identified by source). -func CloudeventsEventSubject(val string) attribute.KeyValue { - return CloudeventsEventSubjectKey.String(val) -} - -// CloudeventsEventType returns an attribute KeyValue conforming to the -// "cloudevents.event_type" semantic conventions. It represents the -// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) -// contains a value describing the type of event related to the originating -// occurrence. -func CloudeventsEventType(val string) attribute.KeyValue { - return CloudeventsEventTypeKey.String(val) -} - -// These attributes allow to report this unit of code and therefore to provide -// more context about the span. -const ( - // CodeColumnKey is the attribute Key conforming to the "code.column" - // semantic conventions. It represents the column number in `code.filepath` - // best representing the operation. It SHOULD point within the code unit - // named in `code.function`. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 16 - CodeColumnKey = attribute.Key("code.column") - - // CodeFilepathKey is the attribute Key conforming to the "code.filepath" - // semantic conventions. It represents the source code file name that - // identifies the code unit as uniquely as possible (preferably an absolute - // file path). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/usr/local/MyApplication/content_root/app/index.php' - CodeFilepathKey = attribute.Key("code.filepath") - - // CodeFunctionKey is the attribute Key conforming to the "code.function" - // semantic conventions. It represents the method or function name, or - // equivalent (usually rightmost part of the code unit's name). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'serveRequest' - CodeFunctionKey = attribute.Key("code.function") - - // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" - // semantic conventions. It represents the line number in `code.filepath` - // best representing the operation. It SHOULD point within the code unit - // named in `code.function`. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - CodeLineNumberKey = attribute.Key("code.lineno") - - // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" - // semantic conventions. It represents the "namespace" within which - // `code.function` is defined. Usually the qualified class or module name, - // such that `code.namespace` + some separator + `code.function` form a - // unique identifier for the code unit. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'com.example.MyHTTPService' - CodeNamespaceKey = attribute.Key("code.namespace") - - // CodeStacktraceKey is the attribute Key conforming to the - // "code.stacktrace" semantic conventions. It represents a stacktrace as a - // string in the natural representation for the language runtime. The - // representation is to be determined and documented by each language SIG. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'at - // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' - // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' - // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' - CodeStacktraceKey = attribute.Key("code.stacktrace") -) - -// CodeColumn returns an attribute KeyValue conforming to the "code.column" -// semantic conventions. It represents the column number in `code.filepath` -// best representing the operation. It SHOULD point within the code unit named -// in `code.function`. -func CodeColumn(val int) attribute.KeyValue { - return CodeColumnKey.Int(val) -} - -// CodeFilepath returns an attribute KeyValue conforming to the -// "code.filepath" semantic conventions. It represents the source code file -// name that identifies the code unit as uniquely as possible (preferably an -// absolute file path). -func CodeFilepath(val string) attribute.KeyValue { - return CodeFilepathKey.String(val) -} - -// CodeFunction returns an attribute KeyValue conforming to the -// "code.function" semantic conventions. It represents the method or function -// name, or equivalent (usually rightmost part of the code unit's name). -func CodeFunction(val string) attribute.KeyValue { - return CodeFunctionKey.String(val) -} - -// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" -// semantic conventions. It represents the line number in `code.filepath` best -// representing the operation. It SHOULD point within the code unit named in -// `code.function`. -func CodeLineNumber(val int) attribute.KeyValue { - return CodeLineNumberKey.Int(val) -} - -// CodeNamespace returns an attribute KeyValue conforming to the -// "code.namespace" semantic conventions. It represents the "namespace" within -// which `code.function` is defined. Usually the qualified class or module -// name, such that `code.namespace` + some separator + `code.function` form a -// unique identifier for the code unit. -func CodeNamespace(val string) attribute.KeyValue { - return CodeNamespaceKey.String(val) -} - -// CodeStacktrace returns an attribute KeyValue conforming to the -// "code.stacktrace" semantic conventions. It represents a stacktrace as a -// string in the natural representation for the language runtime. The -// representation is to be determined and documented by each language SIG. -func CodeStacktrace(val string) attribute.KeyValue { - return CodeStacktraceKey.String(val) -} - -// A container instance. -const ( - // ContainerCommandKey is the attribute Key conforming to the - // "container.command" semantic conventions. It represents the command used - // to run the container (i.e. the command name). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol' - // Note: If using embedded credentials or sensitive data, it is recommended - // to remove them to prevent potential leakage. - ContainerCommandKey = attribute.Key("container.command") - - // ContainerCommandArgsKey is the attribute Key conforming to the - // "container.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) run by the - // container. [2] - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol, --config, config.yaml' - ContainerCommandArgsKey = attribute.Key("container.command_args") - - // ContainerCommandLineKey is the attribute Key conforming to the - // "container.command_line" semantic conventions. It represents the full - // command run by the container as a single string representing the full - // command. [2] - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol --config config.yaml' - ContainerCommandLineKey = attribute.Key("container.command_line") - - // ContainerCPUStateKey is the attribute Key conforming to the - // "container.cpu.state" semantic conventions. It represents the CPU state - // for this data point. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'user', 'kernel' - ContainerCPUStateKey = attribute.Key("container.cpu.state") - - // ContainerIDKey is the attribute Key conforming to the "container.id" - // semantic conventions. It represents the container ID. Usually a UUID, as - // for example used to [identify Docker - // containers](https://docs.docker.com/engine/reference/run/#container-identification). - // The UUID might be abbreviated. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'a3bf90e006b2' - ContainerIDKey = attribute.Key("container.id") - - // ContainerImageIDKey is the attribute Key conforming to the - // "container.image.id" semantic conventions. It represents the runtime - // specific image identifier. Usually a hash algorithm followed by a UUID. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' - // Note: Docker defines a sha256 of the image id; `container.image.id` - // corresponds to the `Image` field from the Docker container inspect - // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) - // endpoint. - // K8S defines a link to the container registry repository with digest - // `"imageID": "registry.azurecr.io - // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. - // The ID is assigned by the container runtime and can vary in different - // environments. Consider using `oci.manifest.digest` if it is important to - // identify the same image in different environments/runtimes. - ContainerImageIDKey = attribute.Key("container.image.id") - - // ContainerImageNameKey is the attribute Key conforming to the - // "container.image.name" semantic conventions. It represents the name of - // the image the container was built on. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'gcr.io/opentelemetry/operator' - ContainerImageNameKey = attribute.Key("container.image.name") - - // ContainerImageRepoDigestsKey is the attribute Key conforming to the - // "container.image.repo_digests" semantic conventions. It represents the - // repo digests of the container image as provided by the container - // runtime. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb', - // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' - // Note: - // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect) - // and - // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238) - // report those under the `RepoDigests` field. - ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") - - // ContainerImageTagsKey is the attribute Key conforming to the - // "container.image.tags" semantic conventions. It represents the container - // image tags. An example can be found in [Docker Image - // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). - // Should be only the `` section of the full name for example from - // `registry.example.com/my-org/my-image:`. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'v1.27.1', '3.5.7-0' - ContainerImageTagsKey = attribute.Key("container.image.tags") - - // ContainerNameKey is the attribute Key conforming to the "container.name" - // semantic conventions. It represents the container name used by container - // runtime. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-autoconf' - ContainerNameKey = attribute.Key("container.name") - - // ContainerRuntimeKey is the attribute Key conforming to the - // "container.runtime" semantic conventions. It represents the container - // runtime managing this container. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'docker', 'containerd', 'rkt' - ContainerRuntimeKey = attribute.Key("container.runtime") -) - -var ( - // When tasks of the cgroup are in user mode (Linux). When all container processes are in user mode (Windows) - ContainerCPUStateUser = ContainerCPUStateKey.String("user") - // When CPU is used by the system (host OS) - ContainerCPUStateSystem = ContainerCPUStateKey.String("system") - // When tasks of the cgroup are in kernel mode (Linux). When all container processes are in kernel mode (Windows) - ContainerCPUStateKernel = ContainerCPUStateKey.String("kernel") -) - -// ContainerCommand returns an attribute KeyValue conforming to the -// "container.command" semantic conventions. It represents the command used to -// run the container (i.e. the command name). -func ContainerCommand(val string) attribute.KeyValue { - return ContainerCommandKey.String(val) -} - -// ContainerCommandArgs returns an attribute KeyValue conforming to the -// "container.command_args" semantic conventions. It represents the all the -// command arguments (including the command/executable itself) run by the -// container. [2] -func ContainerCommandArgs(val ...string) attribute.KeyValue { - return ContainerCommandArgsKey.StringSlice(val) -} - -// ContainerCommandLine returns an attribute KeyValue conforming to the -// "container.command_line" semantic conventions. It represents the full -// command run by the container as a single string representing the full -// command. [2] -func ContainerCommandLine(val string) attribute.KeyValue { - return ContainerCommandLineKey.String(val) -} - -// ContainerID returns an attribute KeyValue conforming to the -// "container.id" semantic conventions. It represents the container ID. Usually -// a UUID, as for example used to [identify Docker -// containers](https://docs.docker.com/engine/reference/run/#container-identification). -// The UUID might be abbreviated. -func ContainerID(val string) attribute.KeyValue { - return ContainerIDKey.String(val) -} - -// ContainerImageID returns an attribute KeyValue conforming to the -// "container.image.id" semantic conventions. It represents the runtime -// specific image identifier. Usually a hash algorithm followed by a UUID. -func ContainerImageID(val string) attribute.KeyValue { - return ContainerImageIDKey.String(val) -} - -// ContainerImageName returns an attribute KeyValue conforming to the -// "container.image.name" semantic conventions. It represents the name of the -// image the container was built on. -func ContainerImageName(val string) attribute.KeyValue { - return ContainerImageNameKey.String(val) -} - -// ContainerImageRepoDigests returns an attribute KeyValue conforming to the -// "container.image.repo_digests" semantic conventions. It represents the repo -// digests of the container image as provided by the container runtime. -func ContainerImageRepoDigests(val ...string) attribute.KeyValue { - return ContainerImageRepoDigestsKey.StringSlice(val) -} - -// ContainerImageTags returns an attribute KeyValue conforming to the -// "container.image.tags" semantic conventions. It represents the container -// image tags. An example can be found in [Docker Image -// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). -// Should be only the `` section of the full name for example from -// `registry.example.com/my-org/my-image:`. -func ContainerImageTags(val ...string) attribute.KeyValue { - return ContainerImageTagsKey.StringSlice(val) -} - -// ContainerName returns an attribute KeyValue conforming to the -// "container.name" semantic conventions. It represents the container name used -// by container runtime. -func ContainerName(val string) attribute.KeyValue { - return ContainerNameKey.String(val) -} - -// ContainerRuntime returns an attribute KeyValue conforming to the -// "container.runtime" semantic conventions. It represents the container -// runtime managing this container. -func ContainerRuntime(val string) attribute.KeyValue { - return ContainerRuntimeKey.String(val) -} - -// This group defines the attributes used to describe telemetry in the context -// of databases. -const ( - // DBClientConnectionsPoolNameKey is the attribute Key conforming to the - // "db.client.connections.pool.name" semantic conventions. It represents - // the name of the connection pool; unique within the instrumented - // application. In case the connection pool implementation doesn't provide - // a name, instrumentation should use a combination of `server.address` and - // `server.port` attributes formatted as `server.address:server.port`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myDataSource' - DBClientConnectionsPoolNameKey = attribute.Key("db.client.connections.pool.name") - - // DBClientConnectionsStateKey is the attribute Key conforming to the - // "db.client.connections.state" semantic conventions. It represents the - // state of a connection in the pool - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'idle' - DBClientConnectionsStateKey = attribute.Key("db.client.connections.state") - - // DBCollectionNameKey is the attribute Key conforming to the - // "db.collection.name" semantic conventions. It represents the name of a - // collection (table, container) within the database. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'public.users', 'customers' - // Note: If the collection name is parsed from the query, it SHOULD match - // the value provided in the query and may be qualified with the schema and - // database name. - // It is RECOMMENDED to capture the value as provided by the application - // without attempting to do any case normalization. - DBCollectionNameKey = attribute.Key("db.collection.name") - - // DBNamespaceKey is the attribute Key conforming to the "db.namespace" - // semantic conventions. It represents the name of the database, fully - // qualified within the server address and port. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'customers', 'test.users' - // Note: If a database system has multiple namespace components, they - // SHOULD be concatenated (potentially using database system specific - // conventions) from most general to most specific namespace component, and - // more specific namespaces SHOULD NOT be captured without the more general - // namespaces, to ensure that "startswith" queries for the more general - // namespaces will be valid. - // Semantic conventions for individual database systems SHOULD document - // what `db.namespace` means in the context of that system. - // It is RECOMMENDED to capture the value as provided by the application - // without attempting to do any case normalization. - DBNamespaceKey = attribute.Key("db.namespace") - - // DBOperationNameKey is the attribute Key conforming to the - // "db.operation.name" semantic conventions. It represents the name of the - // operation or command being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'findAndModify', 'HMSET', 'SELECT' - // Note: It is RECOMMENDED to capture the value as provided by the - // application without attempting to do any case normalization. - DBOperationNameKey = attribute.Key("db.operation.name") - - // DBQueryTextKey is the attribute Key conforming to the "db.query.text" - // semantic conventions. It represents the database query being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'SELECT * FROM wuser_table where username = ?', 'SET mykey - // "WuValue"' - DBQueryTextKey = attribute.Key("db.query.text") - - // DBSystemKey is the attribute Key conforming to the "db.system" semantic - // conventions. It represents the database management system (DBMS) product - // as identified by the client instrumentation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The actual DBMS may differ from the one identified by the client. - // For example, when using PostgreSQL client libraries to connect to a - // CockroachDB, the `db.system` is set to `postgresql` based on the - // instrumentation's best knowledge. - DBSystemKey = attribute.Key("db.system") -) - -var ( - // idle - DBClientConnectionsStateIdle = DBClientConnectionsStateKey.String("idle") - // used - DBClientConnectionsStateUsed = DBClientConnectionsStateKey.String("used") -) - -var ( - // Some other SQL database. Fallback only. See notes - DBSystemOtherSQL = DBSystemKey.String("other_sql") - // Microsoft SQL Server - DBSystemMSSQL = DBSystemKey.String("mssql") - // Microsoft SQL Server Compact - DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") - // MySQL - DBSystemMySQL = DBSystemKey.String("mysql") - // Oracle Database - DBSystemOracle = DBSystemKey.String("oracle") - // IBM DB2 - DBSystemDB2 = DBSystemKey.String("db2") - // PostgreSQL - DBSystemPostgreSQL = DBSystemKey.String("postgresql") - // Amazon Redshift - DBSystemRedshift = DBSystemKey.String("redshift") - // Apache Hive - DBSystemHive = DBSystemKey.String("hive") - // Cloudscape - DBSystemCloudscape = DBSystemKey.String("cloudscape") - // HyperSQL DataBase - DBSystemHSQLDB = DBSystemKey.String("hsqldb") - // Progress Database - DBSystemProgress = DBSystemKey.String("progress") - // SAP MaxDB - DBSystemMaxDB = DBSystemKey.String("maxdb") - // SAP HANA - DBSystemHanaDB = DBSystemKey.String("hanadb") - // Ingres - DBSystemIngres = DBSystemKey.String("ingres") - // FirstSQL - DBSystemFirstSQL = DBSystemKey.String("firstsql") - // EnterpriseDB - DBSystemEDB = DBSystemKey.String("edb") - // InterSystems Caché - DBSystemCache = DBSystemKey.String("cache") - // Adabas (Adaptable Database System) - DBSystemAdabas = DBSystemKey.String("adabas") - // Firebird - DBSystemFirebird = DBSystemKey.String("firebird") - // Apache Derby - DBSystemDerby = DBSystemKey.String("derby") - // FileMaker - DBSystemFilemaker = DBSystemKey.String("filemaker") - // Informix - DBSystemInformix = DBSystemKey.String("informix") - // InstantDB - DBSystemInstantDB = DBSystemKey.String("instantdb") - // InterBase - DBSystemInterbase = DBSystemKey.String("interbase") - // MariaDB - DBSystemMariaDB = DBSystemKey.String("mariadb") - // Netezza - DBSystemNetezza = DBSystemKey.String("netezza") - // Pervasive PSQL - DBSystemPervasive = DBSystemKey.String("pervasive") - // PointBase - DBSystemPointbase = DBSystemKey.String("pointbase") - // SQLite - DBSystemSqlite = DBSystemKey.String("sqlite") - // Sybase - DBSystemSybase = DBSystemKey.String("sybase") - // Teradata - DBSystemTeradata = DBSystemKey.String("teradata") - // Vertica - DBSystemVertica = DBSystemKey.String("vertica") - // H2 - DBSystemH2 = DBSystemKey.String("h2") - // ColdFusion IMQ - DBSystemColdfusion = DBSystemKey.String("coldfusion") - // Apache Cassandra - DBSystemCassandra = DBSystemKey.String("cassandra") - // Apache HBase - DBSystemHBase = DBSystemKey.String("hbase") - // MongoDB - DBSystemMongoDB = DBSystemKey.String("mongodb") - // Redis - DBSystemRedis = DBSystemKey.String("redis") - // Couchbase - DBSystemCouchbase = DBSystemKey.String("couchbase") - // CouchDB - DBSystemCouchDB = DBSystemKey.String("couchdb") - // Microsoft Azure Cosmos DB - DBSystemCosmosDB = DBSystemKey.String("cosmosdb") - // Amazon DynamoDB - DBSystemDynamoDB = DBSystemKey.String("dynamodb") - // Neo4j - DBSystemNeo4j = DBSystemKey.String("neo4j") - // Apache Geode - DBSystemGeode = DBSystemKey.String("geode") - // Elasticsearch - DBSystemElasticsearch = DBSystemKey.String("elasticsearch") - // Memcached - DBSystemMemcached = DBSystemKey.String("memcached") - // CockroachDB - DBSystemCockroachdb = DBSystemKey.String("cockroachdb") - // OpenSearch - DBSystemOpensearch = DBSystemKey.String("opensearch") - // ClickHouse - DBSystemClickhouse = DBSystemKey.String("clickhouse") - // Cloud Spanner - DBSystemSpanner = DBSystemKey.String("spanner") - // Trino - DBSystemTrino = DBSystemKey.String("trino") -) - -// DBClientConnectionsPoolName returns an attribute KeyValue conforming to -// the "db.client.connections.pool.name" semantic conventions. It represents -// the name of the connection pool; unique within the instrumented application. -// In case the connection pool implementation doesn't provide a name, -// instrumentation should use a combination of `server.address` and -// `server.port` attributes formatted as `server.address:server.port`. -func DBClientConnectionsPoolName(val string) attribute.KeyValue { - return DBClientConnectionsPoolNameKey.String(val) -} - -// DBCollectionName returns an attribute KeyValue conforming to the -// "db.collection.name" semantic conventions. It represents the name of a -// collection (table, container) within the database. -func DBCollectionName(val string) attribute.KeyValue { - return DBCollectionNameKey.String(val) -} - -// DBNamespace returns an attribute KeyValue conforming to the -// "db.namespace" semantic conventions. It represents the name of the database, -// fully qualified within the server address and port. -func DBNamespace(val string) attribute.KeyValue { - return DBNamespaceKey.String(val) -} - -// DBOperationName returns an attribute KeyValue conforming to the -// "db.operation.name" semantic conventions. It represents the name of the -// operation or command being executed. -func DBOperationName(val string) attribute.KeyValue { - return DBOperationNameKey.String(val) -} - -// DBQueryText returns an attribute KeyValue conforming to the -// "db.query.text" semantic conventions. It represents the database query being -// executed. -func DBQueryText(val string) attribute.KeyValue { - return DBQueryTextKey.String(val) -} - -// This group defines attributes for Cassandra. -const ( - // DBCassandraConsistencyLevelKey is the attribute Key conforming to the - // "db.cassandra.consistency_level" semantic conventions. It represents the - // consistency level of the query. Based on consistency values from - // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") - - // DBCassandraCoordinatorDCKey is the attribute Key conforming to the - // "db.cassandra.coordinator.dc" semantic conventions. It represents the - // data center of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'us-west-2' - DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") - - // DBCassandraCoordinatorIDKey is the attribute Key conforming to the - // "db.cassandra.coordinator.id" semantic conventions. It represents the ID - // of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' - DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") - - // DBCassandraIdempotenceKey is the attribute Key conforming to the - // "db.cassandra.idempotence" semantic conventions. It represents the - // whether or not the query is idempotent. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") - - // DBCassandraPageSizeKey is the attribute Key conforming to the - // "db.cassandra.page_size" semantic conventions. It represents the fetch - // size used for paging, i.e. how many rows will be returned at once. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 5000 - DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") - - // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming - // to the "db.cassandra.speculative_execution_count" semantic conventions. - // It represents the number of times a query was speculatively executed. - // Not set or `0` if the query was not executed speculatively. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 2 - DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") -) - -var ( - // all - DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") - // each_quorum - DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") - // quorum - DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") - // local_quorum - DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") - // one - DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") - // two - DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") - // three - DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") - // local_one - DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") - // any - DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") - // serial - DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") - // local_serial - DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") -) - -// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.dc" semantic conventions. It represents the data -// center of the coordinating node for a query. -func DBCassandraCoordinatorDC(val string) attribute.KeyValue { - return DBCassandraCoordinatorDCKey.String(val) -} - -// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of -// the coordinating node for a query. -func DBCassandraCoordinatorID(val string) attribute.KeyValue { - return DBCassandraCoordinatorIDKey.String(val) -} - -// DBCassandraIdempotence returns an attribute KeyValue conforming to the -// "db.cassandra.idempotence" semantic conventions. It represents the whether -// or not the query is idempotent. -func DBCassandraIdempotence(val bool) attribute.KeyValue { - return DBCassandraIdempotenceKey.Bool(val) -} - -// DBCassandraPageSize returns an attribute KeyValue conforming to the -// "db.cassandra.page_size" semantic conventions. It represents the fetch size -// used for paging, i.e. how many rows will be returned at once. -func DBCassandraPageSize(val int) attribute.KeyValue { - return DBCassandraPageSizeKey.Int(val) -} - -// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue -// conforming to the "db.cassandra.speculative_execution_count" semantic -// conventions. It represents the number of times a query was speculatively -// executed. Not set or `0` if the query was not executed speculatively. -func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { - return DBCassandraSpeculativeExecutionCountKey.Int(val) -} - -// This group defines attributes for Azure Cosmos DB. -const ( - // DBCosmosDBClientIDKey is the attribute Key conforming to the - // "db.cosmosdb.client_id" semantic conventions. It represents the unique - // Cosmos client instance id. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' - DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") - - // DBCosmosDBConnectionModeKey is the attribute Key conforming to the - // "db.cosmosdb.connection_mode" semantic conventions. It represents the - // cosmos client connection mode. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") - - // DBCosmosDBOperationTypeKey is the attribute Key conforming to the - // "db.cosmosdb.operation_type" semantic conventions. It represents the - // cosmosDB Operation Type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") - - // DBCosmosDBRequestChargeKey is the attribute Key conforming to the - // "db.cosmosdb.request_charge" semantic conventions. It represents the rU - // consumed for that operation - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 46.18, 1.0 - DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") - - // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the - // "db.cosmosdb.request_content_length" semantic conventions. It represents - // the request payload size in bytes - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") - - // DBCosmosDBStatusCodeKey is the attribute Key conforming to the - // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos - // DB status code. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 200, 201 - DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") - - // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the - // "db.cosmosdb.sub_status_code" semantic conventions. It represents the - // cosmos DB sub status code. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1000, 1002 - DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") -) - -var ( - // Gateway (HTTP) connections mode - DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") - // Direct connection - DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") -) - -var ( - // invalid - DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") - // create - DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") - // patch - DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") - // read - DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") - // read_feed - DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") - // delete - DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") - // replace - DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") - // execute - DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") - // query - DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") - // head - DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") - // head_feed - DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") - // upsert - DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") - // batch - DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") - // query_plan - DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") - // execute_javascript - DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") -) - -// DBCosmosDBClientID returns an attribute KeyValue conforming to the -// "db.cosmosdb.client_id" semantic conventions. It represents the unique -// Cosmos client instance id. -func DBCosmosDBClientID(val string) attribute.KeyValue { - return DBCosmosDBClientIDKey.String(val) -} - -// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the -// "db.cosmosdb.request_charge" semantic conventions. It represents the rU -// consumed for that operation -func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { - return DBCosmosDBRequestChargeKey.Float64(val) -} - -// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming -// to the "db.cosmosdb.request_content_length" semantic conventions. It -// represents the request payload size in bytes -func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { - return DBCosmosDBRequestContentLengthKey.Int(val) -} - -// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the -// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB -// status code. -func DBCosmosDBStatusCode(val int) attribute.KeyValue { - return DBCosmosDBStatusCodeKey.Int(val) -} - -// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the -// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos -// DB sub status code. -func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { - return DBCosmosDBSubStatusCodeKey.Int(val) -} - -// This group defines attributes for Elasticsearch. -const ( - // DBElasticsearchClusterNameKey is the attribute Key conforming to the - // "db.elasticsearch.cluster.name" semantic conventions. It represents the - // represents the identifier of an Elasticsearch cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' - DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name") - - // DBElasticsearchNodeNameKey is the attribute Key conforming to the - // "db.elasticsearch.node.name" semantic conventions. It represents the - // represents the human-readable identifier of the node/instance to which a - // request was routed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'instance-0000000001' - DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name") -) - -// DBElasticsearchClusterName returns an attribute KeyValue conforming to -// the "db.elasticsearch.cluster.name" semantic conventions. It represents the -// represents the identifier of an Elasticsearch cluster. -func DBElasticsearchClusterName(val string) attribute.KeyValue { - return DBElasticsearchClusterNameKey.String(val) -} - -// DBElasticsearchNodeName returns an attribute KeyValue conforming to the -// "db.elasticsearch.node.name" semantic conventions. It represents the -// represents the human-readable identifier of the node/instance to which a -// request was routed. -func DBElasticsearchNodeName(val string) attribute.KeyValue { - return DBElasticsearchNodeNameKey.String(val) -} - -// Attributes for software deployments. -const ( - // DeploymentEnvironmentKey is the attribute Key conforming to the - // "deployment.environment" semantic conventions. It represents the name of - // the [deployment - // environment](https://wikipedia.org/wiki/Deployment_environment) (aka - // deployment tier). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'staging', 'production' - // Note: `deployment.environment` does not affect the uniqueness - // constraints defined through - // the `service.namespace`, `service.name` and `service.instance.id` - // resource attributes. - // This implies that resources carrying the following attribute - // combinations MUST be - // considered to be identifying the same service: - // - // * `service.name=frontend`, `deployment.environment=production` - // * `service.name=frontend`, `deployment.environment=staging`. - DeploymentEnvironmentKey = attribute.Key("deployment.environment") -) - -// DeploymentEnvironment returns an attribute KeyValue conforming to the -// "deployment.environment" semantic conventions. It represents the name of the -// [deployment environment](https://wikipedia.org/wiki/Deployment_environment) -// (aka deployment tier). -func DeploymentEnvironment(val string) attribute.KeyValue { - return DeploymentEnvironmentKey.String(val) -} - -// Attributes that represents an occurrence of a lifecycle transition on the -// Android platform. -const ( - // AndroidStateKey is the attribute Key conforming to the "android.state" - // semantic conventions. It represents the deprecated use the - // `device.app.lifecycle` event definition including `android.state` as a - // payload field instead. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The Android lifecycle states are defined in [Activity lifecycle - // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc), - // and from which the `OS identifiers` are derived. - AndroidStateKey = attribute.Key("android.state") -) - -var ( - // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time - AndroidStateCreated = AndroidStateKey.String("created") - // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state - AndroidStateBackground = AndroidStateKey.String("background") - // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states - AndroidStateForeground = AndroidStateKey.String("foreground") -) - -// These attributes may be used to describe the receiver of a network -// exchange/packet. These should be used when there is no client/server -// relationship between the two sides, or when that relationship is unknown. -// This covers low-level network interactions (e.g. packet tracing) where you -// don't know if there was a connection or which side initiated it. This also -// covers unidirectional UDP flows and peer-to-peer communication where the -// "user-facing" surface of the protocol / API doesn't expose a clear notion of -// client and server. -const ( - // DestinationAddressKey is the attribute Key conforming to the - // "destination.address" semantic conventions. It represents the - // destination address - domain name if available without reverse DNS - // lookup; otherwise, IP address or Unix domain socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the source side, and when communicating through - // an intermediary, `destination.address` SHOULD represent the destination - // address behind any intermediaries, for example proxies, if it's - // available. - DestinationAddressKey = attribute.Key("destination.address") - - // DestinationPortKey is the attribute Key conforming to the - // "destination.port" semantic conventions. It represents the destination - // port number - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3389, 2888 - DestinationPortKey = attribute.Key("destination.port") -) - -// DestinationAddress returns an attribute KeyValue conforming to the -// "destination.address" semantic conventions. It represents the destination -// address - domain name if available without reverse DNS lookup; otherwise, IP -// address or Unix domain socket name. -func DestinationAddress(val string) attribute.KeyValue { - return DestinationAddressKey.String(val) -} - -// DestinationPort returns an attribute KeyValue conforming to the -// "destination.port" semantic conventions. It represents the destination port -// number -func DestinationPort(val int) attribute.KeyValue { - return DestinationPortKey.Int(val) -} - -// Describes device attributes. -const ( - // DeviceIDKey is the attribute Key conforming to the "device.id" semantic - // conventions. It represents a unique identifier representing the device - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' - // Note: The device identifier MUST only be defined using the values - // outlined below. This value is not an advertising identifier and MUST NOT - // be used as such. On iOS (Swift or Objective-C), this value MUST be equal - // to the [vendor - // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). - // On Android (Java or Kotlin), this value MUST be equal to the Firebase - // Installation ID or a globally unique UUID which is persisted across - // sessions in your application. More information can be found - // [here](https://developer.android.com/training/articles/user-data-ids) on - // best practices and exact implementation details. Caution should be taken - // when storing personal data or anything which can identify a user. GDPR - // and data protection laws may apply, ensure you do your own due - // diligence. - DeviceIDKey = attribute.Key("device.id") - - // DeviceManufacturerKey is the attribute Key conforming to the - // "device.manufacturer" semantic conventions. It represents the name of - // the device manufacturer - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Apple', 'Samsung' - // Note: The Android OS provides this field via - // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). - // iOS apps SHOULD hardcode the value `Apple`. - DeviceManufacturerKey = attribute.Key("device.manufacturer") - - // DeviceModelIdentifierKey is the attribute Key conforming to the - // "device.model.identifier" semantic conventions. It represents the model - // identifier for the device - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'iPhone3,4', 'SM-G920F' - // Note: It's recommended this value represents a machine-readable version - // of the model identifier rather than the market or consumer-friendly name - // of the device. - DeviceModelIdentifierKey = attribute.Key("device.model.identifier") - - // DeviceModelNameKey is the attribute Key conforming to the - // "device.model.name" semantic conventions. It represents the marketing - // name for the device model - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' - // Note: It's recommended this value represents a human-readable version of - // the device model rather than a machine-readable alternative. - DeviceModelNameKey = attribute.Key("device.model.name") -) - -// DeviceID returns an attribute KeyValue conforming to the "device.id" -// semantic conventions. It represents a unique identifier representing the -// device -func DeviceID(val string) attribute.KeyValue { - return DeviceIDKey.String(val) -} - -// DeviceManufacturer returns an attribute KeyValue conforming to the -// "device.manufacturer" semantic conventions. It represents the name of the -// device manufacturer -func DeviceManufacturer(val string) attribute.KeyValue { - return DeviceManufacturerKey.String(val) -} - -// DeviceModelIdentifier returns an attribute KeyValue conforming to the -// "device.model.identifier" semantic conventions. It represents the model -// identifier for the device -func DeviceModelIdentifier(val string) attribute.KeyValue { - return DeviceModelIdentifierKey.String(val) -} - -// DeviceModelName returns an attribute KeyValue conforming to the -// "device.model.name" semantic conventions. It represents the marketing name -// for the device model -func DeviceModelName(val string) attribute.KeyValue { - return DeviceModelNameKey.String(val) -} - -// These attributes may be used for any disk related operation. -const ( - // DiskIoDirectionKey is the attribute Key conforming to the - // "disk.io.direction" semantic conventions. It represents the disk IO - // operation direction. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'read' - DiskIoDirectionKey = attribute.Key("disk.io.direction") -) - -var ( - // read - DiskIoDirectionRead = DiskIoDirectionKey.String("read") - // write - DiskIoDirectionWrite = DiskIoDirectionKey.String("write") -) - -// The shared attributes used to report a DNS query. -const ( - // DNSQuestionNameKey is the attribute Key conforming to the - // "dns.question.name" semantic conventions. It represents the name being - // queried. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'www.example.com', 'opentelemetry.io' - // Note: If the name field contains non-printable characters (below 32 or - // above 126), those characters should be represented as escaped base 10 - // integers (\DDD). Back slashes and quotes should be escaped. Tabs, - // carriage returns, and line feeds should be converted to \t, \r, and \n - // respectively. - DNSQuestionNameKey = attribute.Key("dns.question.name") -) - -// DNSQuestionName returns an attribute KeyValue conforming to the -// "dns.question.name" semantic conventions. It represents the name being -// queried. -func DNSQuestionName(val string) attribute.KeyValue { - return DNSQuestionNameKey.String(val) -} - -// Attributes for operations with an authenticated and/or authorized enduser. -const ( - // EnduserIDKey is the attribute Key conforming to the "enduser.id" - // semantic conventions. It represents the username or client_id extracted - // from the access token or - // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header - // in the inbound request from outside the system. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'username' - EnduserIDKey = attribute.Key("enduser.id") - - // EnduserRoleKey is the attribute Key conforming to the "enduser.role" - // semantic conventions. It represents the actual/assumed role the client - // is making the request under extracted from token or application security - // context. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'admin' - EnduserRoleKey = attribute.Key("enduser.role") - - // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" - // semantic conventions. It represents the scopes or granted authorities - // the client currently possesses extracted from token or application - // security context. The value would come from the scope associated with an - // [OAuth 2.0 Access - // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute - // value in a [SAML 2.0 - // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'read:message, write:files' - EnduserScopeKey = attribute.Key("enduser.scope") -) - -// EnduserID returns an attribute KeyValue conforming to the "enduser.id" -// semantic conventions. It represents the username or client_id extracted from -// the access token or -// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in -// the inbound request from outside the system. -func EnduserID(val string) attribute.KeyValue { - return EnduserIDKey.String(val) -} - -// EnduserRole returns an attribute KeyValue conforming to the -// "enduser.role" semantic conventions. It represents the actual/assumed role -// the client is making the request under extracted from token or application -// security context. -func EnduserRole(val string) attribute.KeyValue { - return EnduserRoleKey.String(val) -} - -// EnduserScope returns an attribute KeyValue conforming to the -// "enduser.scope" semantic conventions. It represents the scopes or granted -// authorities the client currently possesses extracted from token or -// application security context. The value would come from the scope associated -// with an [OAuth 2.0 Access -// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute -// value in a [SAML 2.0 -// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). -func EnduserScope(val string) attribute.KeyValue { - return EnduserScopeKey.String(val) -} - -// The shared attributes used to report an error. -const ( - // ErrorTypeKey is the attribute Key conforming to the "error.type" - // semantic conventions. It represents the describes a class of error the - // operation ended with. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'timeout', 'java.net.UnknownHostException', - // 'server_certificate_invalid', '500' - // Note: The `error.type` SHOULD be predictable, and SHOULD have low - // cardinality. - // - // When `error.type` is set to a type (e.g., an exception type), its - // canonical class name identifying the type within the artifact SHOULD be - // used. - // - // Instrumentations SHOULD document the list of errors they report. - // - // The cardinality of `error.type` within one instrumentation library - // SHOULD be low. - // Telemetry consumers that aggregate data from multiple instrumentation - // libraries and applications - // should be prepared for `error.type` to have high cardinality at query - // time when no - // additional filters are applied. - // - // If the operation has completed successfully, instrumentations SHOULD NOT - // set `error.type`. - // - // If a specific domain defines its own set of error identifiers (such as - // HTTP or gRPC status codes), - // it's RECOMMENDED to: - // - // * Use a domain-specific attribute - // * Set `error.type` to capture all errors, regardless of whether they are - // defined within the domain-specific set or not. - ErrorTypeKey = attribute.Key("error.type") -) - -var ( - // A fallback error value to be used when the instrumentation doesn't define a custom value - ErrorTypeOther = ErrorTypeKey.String("_OTHER") -) - -// Attributes for Events represented using Log Records. -const ( - // EventNameKey is the attribute Key conforming to the "event.name" - // semantic conventions. It represents the identifies the class / type of - // event. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'browser.mouse.click', 'device.app.lifecycle' - // Note: Event names are subject to the same rules as [attribute - // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.33.0/specification/common/attribute-naming.md). - // Notably, event names are namespaced to avoid collisions and provide a - // clean separation of semantics for events in separate domains like - // browser, mobile, and kubernetes. - EventNameKey = attribute.Key("event.name") -) - -// EventName returns an attribute KeyValue conforming to the "event.name" -// semantic conventions. It represents the identifies the class / type of -// event. -func EventName(val string) attribute.KeyValue { - return EventNameKey.String(val) -} - -// The shared attributes used to report a single exception associated with a -// span or log. -const ( - // ExceptionEscapedKey is the attribute Key conforming to the - // "exception.escaped" semantic conventions. It represents the sHOULD be - // set to true if the exception event is recorded at a point where it is - // known that the exception is escaping the scope of the span. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - // Note: An exception is considered to have escaped (or left) the scope of - // a span, - // if that span is ended while the exception is still logically "in - // flight". - // This may be actually "in flight" in some languages (e.g. if the - // exception - // is passed to a Context manager's `__exit__` method in Python) but will - // usually be caught at the point of recording the exception in most - // languages. - // - // It is usually not possible to determine at the point where an exception - // is thrown - // whether it will escape the scope of a span. - // However, it is trivial to know that an exception - // will escape, if one checks for an active exception just before ending - // the span, - // as done in the [example for recording span - // exceptions](https://opentelemetry.io/docs/specs/semconv/exceptions/exceptions-spans/#recording-an-exception). - // - // It follows that an exception may still escape the scope of the span - // even if the `exception.escaped` attribute was not set or set to false, - // since the event might have been recorded at a time where it was not - // clear whether the exception will escape. - ExceptionEscapedKey = attribute.Key("exception.escaped") - - // ExceptionMessageKey is the attribute Key conforming to the - // "exception.message" semantic conventions. It represents the exception - // message. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Division by zero', "Can't convert 'int' object to str - // implicitly" - ExceptionMessageKey = attribute.Key("exception.message") - - // ExceptionStacktraceKey is the attribute Key conforming to the - // "exception.stacktrace" semantic conventions. It represents a stacktrace - // as a string in the natural representation for the language runtime. The - // representation is to be determined and documented by each language SIG. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test - // exception\\n at ' - // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' - // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' - // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' - ExceptionStacktraceKey = attribute.Key("exception.stacktrace") - - // ExceptionTypeKey is the attribute Key conforming to the "exception.type" - // semantic conventions. It represents the type of the exception (its - // fully-qualified class name, if applicable). The dynamic type of the - // exception should be preferred over the static type in languages that - // support it. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'java.net.ConnectException', 'OSError' - ExceptionTypeKey = attribute.Key("exception.type") -) - -// ExceptionEscaped returns an attribute KeyValue conforming to the -// "exception.escaped" semantic conventions. It represents the sHOULD be set to -// true if the exception event is recorded at a point where it is known that -// the exception is escaping the scope of the span. -func ExceptionEscaped(val bool) attribute.KeyValue { - return ExceptionEscapedKey.Bool(val) -} - -// ExceptionMessage returns an attribute KeyValue conforming to the -// "exception.message" semantic conventions. It represents the exception -// message. -func ExceptionMessage(val string) attribute.KeyValue { - return ExceptionMessageKey.String(val) -} - -// ExceptionStacktrace returns an attribute KeyValue conforming to the -// "exception.stacktrace" semantic conventions. It represents a stacktrace as a -// string in the natural representation for the language runtime. The -// representation is to be determined and documented by each language SIG. -func ExceptionStacktrace(val string) attribute.KeyValue { - return ExceptionStacktraceKey.String(val) -} - -// ExceptionType returns an attribute KeyValue conforming to the -// "exception.type" semantic conventions. It represents the type of the -// exception (its fully-qualified class name, if applicable). The dynamic type -// of the exception should be preferred over the static type in languages that -// support it. -func ExceptionType(val string) attribute.KeyValue { - return ExceptionTypeKey.String(val) -} - -// FaaS attributes -const ( - // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" - // semantic conventions. It represents a boolean that is true if the - // serverless function is executed for the first time (aka cold-start). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - FaaSColdstartKey = attribute.Key("faas.coldstart") - - // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic - // conventions. It represents a string containing the schedule period as - // [Cron - // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0/5 * * * ? *' - FaaSCronKey = attribute.Key("faas.cron") - - // FaaSDocumentCollectionKey is the attribute Key conforming to the - // "faas.document.collection" semantic conventions. It represents the name - // of the source on which the triggering operation was performed. For - // example, in Cloud Storage or S3 corresponds to the bucket name, and in - // Cosmos DB to the database name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myBucketName', 'myDBName' - FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") - - // FaaSDocumentNameKey is the attribute Key conforming to the - // "faas.document.name" semantic conventions. It represents the document - // name/table subjected to the operation. For example, in Cloud Storage or - // S3 is the name of the file, and in Cosmos DB the table name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myFile.txt', 'myTableName' - FaaSDocumentNameKey = attribute.Key("faas.document.name") - - // FaaSDocumentOperationKey is the attribute Key conforming to the - // "faas.document.operation" semantic conventions. It represents the - // describes the type of the operation that was performed on the data. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - FaaSDocumentOperationKey = attribute.Key("faas.document.operation") - - // FaaSDocumentTimeKey is the attribute Key conforming to the - // "faas.document.time" semantic conventions. It represents a string - // containing the time when the data was accessed in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format - // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2020-01-23T13:47:06Z' - FaaSDocumentTimeKey = attribute.Key("faas.document.time") - - // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" - // semantic conventions. It represents the execution environment ID as a - // string, that will be potentially reused for other invocations to the - // same function/function version. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' - // Note: * **AWS Lambda:** Use the (full) log stream name. - FaaSInstanceKey = attribute.Key("faas.instance") - - // FaaSInvocationIDKey is the attribute Key conforming to the - // "faas.invocation_id" semantic conventions. It represents the invocation - // ID of the current function invocation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' - FaaSInvocationIDKey = attribute.Key("faas.invocation_id") - - // FaaSInvokedNameKey is the attribute Key conforming to the - // "faas.invoked_name" semantic conventions. It represents the name of the - // invoked function. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-function' - // Note: SHOULD be equal to the `faas.name` resource attribute of the - // invoked function. - FaaSInvokedNameKey = attribute.Key("faas.invoked_name") - - // FaaSInvokedProviderKey is the attribute Key conforming to the - // "faas.invoked_provider" semantic conventions. It represents the cloud - // provider of the invoked function. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: SHOULD be equal to the `cloud.provider` resource attribute of the - // invoked function. - FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") - - // FaaSInvokedRegionKey is the attribute Key conforming to the - // "faas.invoked_region" semantic conventions. It represents the cloud - // region of the invoked function. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'eu-central-1' - // Note: SHOULD be equal to the `cloud.region` resource attribute of the - // invoked function. - FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") - - // FaaSMaxMemoryKey is the attribute Key conforming to the - // "faas.max_memory" semantic conventions. It represents the amount of - // memory available to the serverless function converted to Bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 134217728 - // Note: It's recommended to set this attribute since e.g. too little - // memory can easily stop a Java AWS Lambda function from working - // correctly. On AWS Lambda, the environment variable - // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must - // be multiplied by 1,048,576). - FaaSMaxMemoryKey = attribute.Key("faas.max_memory") - - // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic - // conventions. It represents the name of the single function that this - // runtime instance executes. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-function', 'myazurefunctionapp/some-function-name' - // Note: This is the name of the function as configured/deployed on the - // FaaS - // platform and is usually different from the name of the callback - // function (which may be stored in the - // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes) - // span attributes). - // - // For some cloud providers, the above definition is ambiguous. The - // following - // definition of function name MUST be used for this attribute - // (and consequently the span name) for the listed cloud - // providers/products: - // - // * **Azure:** The full name `/`, i.e., function app name - // followed by a forward slash followed by the function name (this form - // can also be seen in the resource JSON for the function). - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider (see also the `cloud.resource_id` attribute). - FaaSNameKey = attribute.Key("faas.name") - - // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic - // conventions. It represents a string containing the function invocation - // time in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format - // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2020-01-23T13:47:06Z' - FaaSTimeKey = attribute.Key("faas.time") - - // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" - // semantic conventions. It represents the type of the trigger which caused - // this function invocation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - FaaSTriggerKey = attribute.Key("faas.trigger") - - // FaaSVersionKey is the attribute Key conforming to the "faas.version" - // semantic conventions. It represents the immutable version of the - // function being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '26', 'pinkfroid-00002' - // Note: Depending on the cloud provider and platform, use: - // - // * **AWS Lambda:** The [function - // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) - // (an integer represented as a decimal string). - // * **Google Cloud Run (Services):** The - // [revision](https://cloud.google.com/run/docs/managing/revisions) - // (i.e., the function name plus the revision suffix). - // * **Google Cloud Functions:** The value of the - // [`K_REVISION` environment - // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). - // * **Azure Functions:** Not applicable. Do not set this attribute. - FaaSVersionKey = attribute.Key("faas.version") -) - -var ( - // When a new object is created - FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") - // When an object is modified - FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") - // When an object is deleted - FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") -) - -var ( - // Alibaba Cloud - FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") - // Amazon Web Services - FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") - // Microsoft Azure - FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") - // Google Cloud Platform - FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") - // Tencent Cloud - FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") -) - -var ( - // A response to some data source operation such as a database or filesystem read/write - FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") - // To provide an answer to an inbound HTTP request - FaaSTriggerHTTP = FaaSTriggerKey.String("http") - // A function is set to be executed when messages are sent to a messaging system - FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") - // A function is scheduled to be executed regularly - FaaSTriggerTimer = FaaSTriggerKey.String("timer") - // If none of the others apply - FaaSTriggerOther = FaaSTriggerKey.String("other") -) - -// FaaSColdstart returns an attribute KeyValue conforming to the -// "faas.coldstart" semantic conventions. It represents a boolean that is true -// if the serverless function is executed for the first time (aka cold-start). -func FaaSColdstart(val bool) attribute.KeyValue { - return FaaSColdstartKey.Bool(val) -} - -// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" -// semantic conventions. It represents a string containing the schedule period -// as [Cron -// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). -func FaaSCron(val string) attribute.KeyValue { - return FaaSCronKey.String(val) -} - -// FaaSDocumentCollection returns an attribute KeyValue conforming to the -// "faas.document.collection" semantic conventions. It represents the name of -// the source on which the triggering operation was performed. For example, in -// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the -// database name. -func FaaSDocumentCollection(val string) attribute.KeyValue { - return FaaSDocumentCollectionKey.String(val) -} - -// FaaSDocumentName returns an attribute KeyValue conforming to the -// "faas.document.name" semantic conventions. It represents the document -// name/table subjected to the operation. For example, in Cloud Storage or S3 -// is the name of the file, and in Cosmos DB the table name. -func FaaSDocumentName(val string) attribute.KeyValue { - return FaaSDocumentNameKey.String(val) -} - -// FaaSDocumentTime returns an attribute KeyValue conforming to the -// "faas.document.time" semantic conventions. It represents a string containing -// the time when the data was accessed in the [ISO -// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format -// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). -func FaaSDocumentTime(val string) attribute.KeyValue { - return FaaSDocumentTimeKey.String(val) -} - -// FaaSInstance returns an attribute KeyValue conforming to the -// "faas.instance" semantic conventions. It represents the execution -// environment ID as a string, that will be potentially reused for other -// invocations to the same function/function version. -func FaaSInstance(val string) attribute.KeyValue { - return FaaSInstanceKey.String(val) -} - -// FaaSInvocationID returns an attribute KeyValue conforming to the -// "faas.invocation_id" semantic conventions. It represents the invocation ID -// of the current function invocation. -func FaaSInvocationID(val string) attribute.KeyValue { - return FaaSInvocationIDKey.String(val) -} - -// FaaSInvokedName returns an attribute KeyValue conforming to the -// "faas.invoked_name" semantic conventions. It represents the name of the -// invoked function. -func FaaSInvokedName(val string) attribute.KeyValue { - return FaaSInvokedNameKey.String(val) -} - -// FaaSInvokedRegion returns an attribute KeyValue conforming to the -// "faas.invoked_region" semantic conventions. It represents the cloud region -// of the invoked function. -func FaaSInvokedRegion(val string) attribute.KeyValue { - return FaaSInvokedRegionKey.String(val) -} - -// FaaSMaxMemory returns an attribute KeyValue conforming to the -// "faas.max_memory" semantic conventions. It represents the amount of memory -// available to the serverless function converted to Bytes. -func FaaSMaxMemory(val int) attribute.KeyValue { - return FaaSMaxMemoryKey.Int(val) -} - -// FaaSName returns an attribute KeyValue conforming to the "faas.name" -// semantic conventions. It represents the name of the single function that -// this runtime instance executes. -func FaaSName(val string) attribute.KeyValue { - return FaaSNameKey.String(val) -} - -// FaaSTime returns an attribute KeyValue conforming to the "faas.time" -// semantic conventions. It represents a string containing the function -// invocation time in the [ISO -// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format -// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). -func FaaSTime(val string) attribute.KeyValue { - return FaaSTimeKey.String(val) -} - -// FaaSVersion returns an attribute KeyValue conforming to the -// "faas.version" semantic conventions. It represents the immutable version of -// the function being executed. -func FaaSVersion(val string) attribute.KeyValue { - return FaaSVersionKey.String(val) -} - -// Attributes for Feature Flags. -const ( - // FeatureFlagKeyKey is the attribute Key conforming to the - // "feature_flag.key" semantic conventions. It represents the unique - // identifier of the feature flag. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'logo-color' - FeatureFlagKeyKey = attribute.Key("feature_flag.key") - - // FeatureFlagProviderNameKey is the attribute Key conforming to the - // "feature_flag.provider_name" semantic conventions. It represents the - // name of the service provider that performs the flag evaluation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Flag Manager' - FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") - - // FeatureFlagVariantKey is the attribute Key conforming to the - // "feature_flag.variant" semantic conventions. It represents the sHOULD be - // a semantic identifier for a value. If one is unavailable, a stringified - // version of the value can be used. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'red', 'true', 'on' - // Note: A semantic identifier, commonly referred to as a variant, provides - // a means - // for referring to a value without including the value itself. This can - // provide additional context for understanding the meaning behind a value. - // For example, the variant `red` maybe be used for the value `#c05543`. - // - // A stringified version of the value can be used in situations where a - // semantic identifier is unavailable. String representation of the value - // should be determined by the implementer. - FeatureFlagVariantKey = attribute.Key("feature_flag.variant") -) - -// FeatureFlagKey returns an attribute KeyValue conforming to the -// "feature_flag.key" semantic conventions. It represents the unique identifier -// of the feature flag. -func FeatureFlagKey(val string) attribute.KeyValue { - return FeatureFlagKeyKey.String(val) -} - -// FeatureFlagProviderName returns an attribute KeyValue conforming to the -// "feature_flag.provider_name" semantic conventions. It represents the name of -// the service provider that performs the flag evaluation. -func FeatureFlagProviderName(val string) attribute.KeyValue { - return FeatureFlagProviderNameKey.String(val) -} - -// FeatureFlagVariant returns an attribute KeyValue conforming to the -// "feature_flag.variant" semantic conventions. It represents the sHOULD be a -// semantic identifier for a value. If one is unavailable, a stringified -// version of the value can be used. -func FeatureFlagVariant(val string) attribute.KeyValue { - return FeatureFlagVariantKey.String(val) -} - -// Describes file attributes. -const ( - // FileDirectoryKey is the attribute Key conforming to the "file.directory" - // semantic conventions. It represents the directory where the file is - // located. It should include the drive letter, when appropriate. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/home/user', 'C:\\Program Files\\MyApp' - FileDirectoryKey = attribute.Key("file.directory") - - // FileExtensionKey is the attribute Key conforming to the "file.extension" - // semantic conventions. It represents the file extension, excluding the - // leading dot. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'png', 'gz' - // Note: When the file name has multiple extensions (example.tar.gz), only - // the last one should be captured ("gz", not "tar.gz"). - FileExtensionKey = attribute.Key("file.extension") - - // FileNameKey is the attribute Key conforming to the "file.name" semantic - // conventions. It represents the name of the file including the extension, - // without the directory. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'example.png' - FileNameKey = attribute.Key("file.name") - - // FilePathKey is the attribute Key conforming to the "file.path" semantic - // conventions. It represents the full path to the file, including the file - // name. It should include the drive letter, when appropriate. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/home/alice/example.png', 'C:\\Program - // Files\\MyApp\\myapp.exe' - FilePathKey = attribute.Key("file.path") - - // FileSizeKey is the attribute Key conforming to the "file.size" semantic - // conventions. It represents the file size in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - FileSizeKey = attribute.Key("file.size") -) - -// FileDirectory returns an attribute KeyValue conforming to the -// "file.directory" semantic conventions. It represents the directory where the -// file is located. It should include the drive letter, when appropriate. -func FileDirectory(val string) attribute.KeyValue { - return FileDirectoryKey.String(val) -} - -// FileExtension returns an attribute KeyValue conforming to the -// "file.extension" semantic conventions. It represents the file extension, -// excluding the leading dot. -func FileExtension(val string) attribute.KeyValue { - return FileExtensionKey.String(val) -} - -// FileName returns an attribute KeyValue conforming to the "file.name" -// semantic conventions. It represents the name of the file including the -// extension, without the directory. -func FileName(val string) attribute.KeyValue { - return FileNameKey.String(val) -} - -// FilePath returns an attribute KeyValue conforming to the "file.path" -// semantic conventions. It represents the full path to the file, including the -// file name. It should include the drive letter, when appropriate. -func FilePath(val string) attribute.KeyValue { - return FilePathKey.String(val) -} - -// FileSize returns an attribute KeyValue conforming to the "file.size" -// semantic conventions. It represents the file size in bytes. -func FileSize(val int) attribute.KeyValue { - return FileSizeKey.Int(val) -} - -// Attributes for Google Cloud Run. -const ( - // GCPCloudRunJobExecutionKey is the attribute Key conforming to the - // "gcp.cloud_run.job.execution" semantic conventions. It represents the - // name of the Cloud Run - // [execution](https://cloud.google.com/run/docs/managing/job-executions) - // being run for the Job, as set by the - // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) - // environment variable. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'job-name-xxxx', 'sample-job-mdw84' - GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") - - // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the - // "gcp.cloud_run.job.task_index" semantic conventions. It represents the - // index for a task within an execution as provided by the - // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) - // environment variable. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 1 - GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") -) - -// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the -// "gcp.cloud_run.job.execution" semantic conventions. It represents the name -// of the Cloud Run -// [execution](https://cloud.google.com/run/docs/managing/job-executions) being -// run for the Job, as set by the -// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) -// environment variable. -func GCPCloudRunJobExecution(val string) attribute.KeyValue { - return GCPCloudRunJobExecutionKey.String(val) -} - -// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the -// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index -// for a task within an execution as provided by the -// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) -// environment variable. -func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { - return GCPCloudRunJobTaskIndexKey.Int(val) -} - -// Attributes for Google Compute Engine (GCE). -const ( - // GCPGceInstanceHostnameKey is the attribute Key conforming to the - // "gcp.gce.instance.hostname" semantic conventions. It represents the - // hostname of a GCE instance. This is the full value of the default or - // [custom - // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-host1234.example.com', - // 'sample-vm.us-west1-b.c.my-project.internal' - GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") - - // GCPGceInstanceNameKey is the attribute Key conforming to the - // "gcp.gce.instance.name" semantic conventions. It represents the instance - // name of a GCE instance. This is the value provided by `host.name`, the - // visible name of the instance in the Cloud Console UI, and the prefix for - // the default hostname of the instance as defined by the [default internal - // DNS - // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'instance-1', 'my-vm-name' - GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") -) - -// GCPGceInstanceHostname returns an attribute KeyValue conforming to the -// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname -// of a GCE instance. This is the full value of the default or [custom -// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). -func GCPGceInstanceHostname(val string) attribute.KeyValue { - return GCPGceInstanceHostnameKey.String(val) -} - -// GCPGceInstanceName returns an attribute KeyValue conforming to the -// "gcp.gce.instance.name" semantic conventions. It represents the instance -// name of a GCE instance. This is the value provided by `host.name`, the -// visible name of the instance in the Cloud Console UI, and the prefix for the -// default hostname of the instance as defined by the [default internal DNS -// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). -func GCPGceInstanceName(val string) attribute.KeyValue { - return GCPGceInstanceNameKey.String(val) -} - -// The attributes used to describe telemetry in the context of LLM (Large -// Language Models) requests and responses. -const ( - // GenAiCompletionKey is the attribute Key conforming to the - // "gen_ai.completion" semantic conventions. It represents the full - // response received from the LLM. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: "[{'role': 'assistant', 'content': 'The capital of France is - // Paris.'}]" - // Note: It's RECOMMENDED to format completions as JSON string matching - // [OpenAI messages - // format](https://platform.openai.com/docs/guides/text-generation) - GenAiCompletionKey = attribute.Key("gen_ai.completion") - - // GenAiPromptKey is the attribute Key conforming to the "gen_ai.prompt" - // semantic conventions. It represents the full prompt sent to an LLM. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: "[{'role': 'user', 'content': 'What is the capital of - // France?'}]" - // Note: It's RECOMMENDED to format prompts as JSON string matching [OpenAI - // messages - // format](https://platform.openai.com/docs/guides/text-generation) - GenAiPromptKey = attribute.Key("gen_ai.prompt") - - // GenAiRequestMaxTokensKey is the attribute Key conforming to the - // "gen_ai.request.max_tokens" semantic conventions. It represents the - // maximum number of tokens the LLM generates for a request. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 100 - GenAiRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens") - - // GenAiRequestModelKey is the attribute Key conforming to the - // "gen_ai.request.model" semantic conventions. It represents the name of - // the LLM a request is being made to. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'gpt-4' - GenAiRequestModelKey = attribute.Key("gen_ai.request.model") - - // GenAiRequestTemperatureKey is the attribute Key conforming to the - // "gen_ai.request.temperature" semantic conventions. It represents the - // temperature setting for the LLM request. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0.0 - GenAiRequestTemperatureKey = attribute.Key("gen_ai.request.temperature") - - // GenAiRequestTopPKey is the attribute Key conforming to the - // "gen_ai.request.top_p" semantic conventions. It represents the top_p - // sampling setting for the LLM request. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1.0 - GenAiRequestTopPKey = attribute.Key("gen_ai.request.top_p") - - // GenAiResponseFinishReasonsKey is the attribute Key conforming to the - // "gen_ai.response.finish_reasons" semantic conventions. It represents the - // array of reasons the model stopped generating tokens, corresponding to - // each generation received. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'stop' - GenAiResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons") - - // GenAiResponseIDKey is the attribute Key conforming to the - // "gen_ai.response.id" semantic conventions. It represents the unique - // identifier for the completion. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'chatcmpl-123' - GenAiResponseIDKey = attribute.Key("gen_ai.response.id") - - // GenAiResponseModelKey is the attribute Key conforming to the - // "gen_ai.response.model" semantic conventions. It represents the name of - // the LLM a response was generated from. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'gpt-4-0613' - GenAiResponseModelKey = attribute.Key("gen_ai.response.model") - - // GenAiSystemKey is the attribute Key conforming to the "gen_ai.system" - // semantic conventions. It represents the Generative AI product as - // identified by the client instrumentation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'openai' - // Note: The actual GenAI product may differ from the one identified by the - // client. For example, when using OpenAI client libraries to communicate - // with Mistral, the `gen_ai.system` is set to `openai` based on the - // instrumentation's best knowledge. - GenAiSystemKey = attribute.Key("gen_ai.system") - - // GenAiUsageCompletionTokensKey is the attribute Key conforming to the - // "gen_ai.usage.completion_tokens" semantic conventions. It represents the - // number of tokens used in the LLM response (completion). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 180 - GenAiUsageCompletionTokensKey = attribute.Key("gen_ai.usage.completion_tokens") - - // GenAiUsagePromptTokensKey is the attribute Key conforming to the - // "gen_ai.usage.prompt_tokens" semantic conventions. It represents the - // number of tokens used in the LLM prompt. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 100 - GenAiUsagePromptTokensKey = attribute.Key("gen_ai.usage.prompt_tokens") -) - -var ( - // OpenAI - GenAiSystemOpenai = GenAiSystemKey.String("openai") -) - -// GenAiCompletion returns an attribute KeyValue conforming to the -// "gen_ai.completion" semantic conventions. It represents the full response -// received from the LLM. -func GenAiCompletion(val string) attribute.KeyValue { - return GenAiCompletionKey.String(val) -} - -// GenAiPrompt returns an attribute KeyValue conforming to the -// "gen_ai.prompt" semantic conventions. It represents the full prompt sent to -// an LLM. -func GenAiPrompt(val string) attribute.KeyValue { - return GenAiPromptKey.String(val) -} - -// GenAiRequestMaxTokens returns an attribute KeyValue conforming to the -// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum -// number of tokens the LLM generates for a request. -func GenAiRequestMaxTokens(val int) attribute.KeyValue { - return GenAiRequestMaxTokensKey.Int(val) -} - -// GenAiRequestModel returns an attribute KeyValue conforming to the -// "gen_ai.request.model" semantic conventions. It represents the name of the -// LLM a request is being made to. -func GenAiRequestModel(val string) attribute.KeyValue { - return GenAiRequestModelKey.String(val) -} - -// GenAiRequestTemperature returns an attribute KeyValue conforming to the -// "gen_ai.request.temperature" semantic conventions. It represents the -// temperature setting for the LLM request. -func GenAiRequestTemperature(val float64) attribute.KeyValue { - return GenAiRequestTemperatureKey.Float64(val) -} - -// GenAiRequestTopP returns an attribute KeyValue conforming to the -// "gen_ai.request.top_p" semantic conventions. It represents the top_p -// sampling setting for the LLM request. -func GenAiRequestTopP(val float64) attribute.KeyValue { - return GenAiRequestTopPKey.Float64(val) -} - -// GenAiResponseFinishReasons returns an attribute KeyValue conforming to -// the "gen_ai.response.finish_reasons" semantic conventions. It represents the -// array of reasons the model stopped generating tokens, corresponding to each -// generation received. -func GenAiResponseFinishReasons(val ...string) attribute.KeyValue { - return GenAiResponseFinishReasonsKey.StringSlice(val) -} - -// GenAiResponseID returns an attribute KeyValue conforming to the -// "gen_ai.response.id" semantic conventions. It represents the unique -// identifier for the completion. -func GenAiResponseID(val string) attribute.KeyValue { - return GenAiResponseIDKey.String(val) -} - -// GenAiResponseModel returns an attribute KeyValue conforming to the -// "gen_ai.response.model" semantic conventions. It represents the name of the -// LLM a response was generated from. -func GenAiResponseModel(val string) attribute.KeyValue { - return GenAiResponseModelKey.String(val) -} - -// GenAiUsageCompletionTokens returns an attribute KeyValue conforming to -// the "gen_ai.usage.completion_tokens" semantic conventions. It represents the -// number of tokens used in the LLM response (completion). -func GenAiUsageCompletionTokens(val int) attribute.KeyValue { - return GenAiUsageCompletionTokensKey.Int(val) -} - -// GenAiUsagePromptTokens returns an attribute KeyValue conforming to the -// "gen_ai.usage.prompt_tokens" semantic conventions. It represents the number -// of tokens used in the LLM prompt. -func GenAiUsagePromptTokens(val int) attribute.KeyValue { - return GenAiUsagePromptTokensKey.Int(val) -} - -// Attributes for GraphQL. -const ( - // GraphqlDocumentKey is the attribute Key conforming to the - // "graphql.document" semantic conventions. It represents the GraphQL - // document being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'query findBookByID { bookByID(id: ?) { name } }' - // Note: The value may be sanitized to exclude sensitive information. - GraphqlDocumentKey = attribute.Key("graphql.document") - - // GraphqlOperationNameKey is the attribute Key conforming to the - // "graphql.operation.name" semantic conventions. It represents the name of - // the operation being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'findBookByID' - GraphqlOperationNameKey = attribute.Key("graphql.operation.name") - - // GraphqlOperationTypeKey is the attribute Key conforming to the - // "graphql.operation.type" semantic conventions. It represents the type of - // the operation being executed. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'query', 'mutation', 'subscription' - GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") -) - -var ( - // GraphQL query - GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") - // GraphQL mutation - GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") - // GraphQL subscription - GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") -) - -// GraphqlDocument returns an attribute KeyValue conforming to the -// "graphql.document" semantic conventions. It represents the GraphQL document -// being executed. -func GraphqlDocument(val string) attribute.KeyValue { - return GraphqlDocumentKey.String(val) -} - -// GraphqlOperationName returns an attribute KeyValue conforming to the -// "graphql.operation.name" semantic conventions. It represents the name of the -// operation being executed. -func GraphqlOperationName(val string) attribute.KeyValue { - return GraphqlOperationNameKey.String(val) -} - -// Attributes for the Android platform on which the Android application is -// running. -const ( - // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" - // semantic conventions. It represents the unique identifier for the - // application - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' - HerokuAppIDKey = attribute.Key("heroku.app.id") - - // HerokuReleaseCommitKey is the attribute Key conforming to the - // "heroku.release.commit" semantic conventions. It represents the commit - // hash for the current release - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' - HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") - - // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the - // "heroku.release.creation_timestamp" semantic conventions. It represents - // the time and date the release was created - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2022-10-23T18:00:42Z' - HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") -) - -// HerokuAppID returns an attribute KeyValue conforming to the -// "heroku.app.id" semantic conventions. It represents the unique identifier -// for the application -func HerokuAppID(val string) attribute.KeyValue { - return HerokuAppIDKey.String(val) -} - -// HerokuReleaseCommit returns an attribute KeyValue conforming to the -// "heroku.release.commit" semantic conventions. It represents the commit hash -// for the current release -func HerokuReleaseCommit(val string) attribute.KeyValue { - return HerokuReleaseCommitKey.String(val) -} - -// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming -// to the "heroku.release.creation_timestamp" semantic conventions. It -// represents the time and date the release was created -func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { - return HerokuReleaseCreationTimestampKey.String(val) -} - -// A host is defined as a computing instance. For example, physical servers, -// virtual machines, switches or disk array. -const ( - // HostArchKey is the attribute Key conforming to the "host.arch" semantic - // conventions. It represents the CPU architecture the host system is - // running on. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - HostArchKey = attribute.Key("host.arch") - - // HostCPUCacheL2SizeKey is the attribute Key conforming to the - // "host.cpu.cache.l2.size" semantic conventions. It represents the amount - // of level 2 memory cache available to the processor (in Bytes). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 12288000 - HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") - - // HostCPUFamilyKey is the attribute Key conforming to the - // "host.cpu.family" semantic conventions. It represents the family or - // generation of the CPU. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '6', 'PA-RISC 1.1e' - HostCPUFamilyKey = attribute.Key("host.cpu.family") - - // HostCPUModelIDKey is the attribute Key conforming to the - // "host.cpu.model.id" semantic conventions. It represents the model - // identifier. It provides more granular information about the CPU, - // distinguishing it from other CPUs within the same family. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '6', '9000/778/B180L' - HostCPUModelIDKey = attribute.Key("host.cpu.model.id") - - // HostCPUModelNameKey is the attribute Key conforming to the - // "host.cpu.model.name" semantic conventions. It represents the model - // designation of the processor. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' - HostCPUModelNameKey = attribute.Key("host.cpu.model.name") - - // HostCPUSteppingKey is the attribute Key conforming to the - // "host.cpu.stepping" semantic conventions. It represents the stepping or - // core revisions. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1', 'r1p1' - HostCPUSteppingKey = attribute.Key("host.cpu.stepping") - - // HostCPUVendorIDKey is the attribute Key conforming to the - // "host.cpu.vendor.id" semantic conventions. It represents the processor - // manufacturer identifier. A maximum 12-character string. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'GenuineIntel' - // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor - // ID string in EBX, EDX and ECX registers. Writing these to memory in this - // order results in a 12-character string. - HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") - - // HostIDKey is the attribute Key conforming to the "host.id" semantic - // conventions. It represents the unique host ID. For Cloud, this must be - // the instance_id assigned by the cloud provider. For non-containerized - // systems, this should be the `machine-id`. See the table below for the - // sources to use to determine the `machine-id` based on operating system. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'fdbf79e8af94cb7f9e8df36789187052' - HostIDKey = attribute.Key("host.id") - - // HostImageIDKey is the attribute Key conforming to the "host.image.id" - // semantic conventions. It represents the vM image ID or host OS image ID. - // For Cloud, this value is from the provider. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ami-07b06b442921831e5' - HostImageIDKey = attribute.Key("host.image.id") - - // HostImageNameKey is the attribute Key conforming to the - // "host.image.name" semantic conventions. It represents the name of the VM - // image or OS install the host was instantiated from. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' - HostImageNameKey = attribute.Key("host.image.name") - - // HostImageVersionKey is the attribute Key conforming to the - // "host.image.version" semantic conventions. It represents the version - // string of the VM image or host OS as defined in [Version - // Attributes](/docs/resource/README.md#version-attributes). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0.1' - HostImageVersionKey = attribute.Key("host.image.version") - - // HostIPKey is the attribute Key conforming to the "host.ip" semantic - // conventions. It represents the available IP addresses of the host, - // excluding loopback interfaces. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' - // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 - // addresses MUST be specified in the [RFC - // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format. - HostIPKey = attribute.Key("host.ip") - - // HostMacKey is the attribute Key conforming to the "host.mac" semantic - // conventions. It represents the available MAC addresses of the host, - // excluding loopback interfaces. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F' - // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal - // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf): - // as hyphen-separated octets in uppercase hexadecimal form from most to - // least significant. - HostMacKey = attribute.Key("host.mac") - - // HostNameKey is the attribute Key conforming to the "host.name" semantic - // conventions. It represents the name of the host. On Unix systems, it may - // contain what the hostname command returns, or the fully qualified - // hostname, or another name specified by the user. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-test' - HostNameKey = attribute.Key("host.name") - - // HostTypeKey is the attribute Key conforming to the "host.type" semantic - // conventions. It represents the type of host. For Cloud, this must be the - // machine type. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'n1-standard-1' - HostTypeKey = attribute.Key("host.type") -) - -var ( - // AMD64 - HostArchAMD64 = HostArchKey.String("amd64") - // ARM32 - HostArchARM32 = HostArchKey.String("arm32") - // ARM64 - HostArchARM64 = HostArchKey.String("arm64") - // Itanium - HostArchIA64 = HostArchKey.String("ia64") - // 32-bit PowerPC - HostArchPPC32 = HostArchKey.String("ppc32") - // 64-bit PowerPC - HostArchPPC64 = HostArchKey.String("ppc64") - // IBM z/Architecture - HostArchS390x = HostArchKey.String("s390x") - // 32-bit x86 - HostArchX86 = HostArchKey.String("x86") -) - -// HostCPUCacheL2Size returns an attribute KeyValue conforming to the -// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of -// level 2 memory cache available to the processor (in Bytes). -func HostCPUCacheL2Size(val int) attribute.KeyValue { - return HostCPUCacheL2SizeKey.Int(val) -} - -// HostCPUFamily returns an attribute KeyValue conforming to the -// "host.cpu.family" semantic conventions. It represents the family or -// generation of the CPU. -func HostCPUFamily(val string) attribute.KeyValue { - return HostCPUFamilyKey.String(val) -} - -// HostCPUModelID returns an attribute KeyValue conforming to the -// "host.cpu.model.id" semantic conventions. It represents the model -// identifier. It provides more granular information about the CPU, -// distinguishing it from other CPUs within the same family. -func HostCPUModelID(val string) attribute.KeyValue { - return HostCPUModelIDKey.String(val) -} - -// HostCPUModelName returns an attribute KeyValue conforming to the -// "host.cpu.model.name" semantic conventions. It represents the model -// designation of the processor. -func HostCPUModelName(val string) attribute.KeyValue { - return HostCPUModelNameKey.String(val) -} - -// HostCPUStepping returns an attribute KeyValue conforming to the -// "host.cpu.stepping" semantic conventions. It represents the stepping or core -// revisions. -func HostCPUStepping(val string) attribute.KeyValue { - return HostCPUSteppingKey.String(val) -} - -// HostCPUVendorID returns an attribute KeyValue conforming to the -// "host.cpu.vendor.id" semantic conventions. It represents the processor -// manufacturer identifier. A maximum 12-character string. -func HostCPUVendorID(val string) attribute.KeyValue { - return HostCPUVendorIDKey.String(val) -} - -// HostID returns an attribute KeyValue conforming to the "host.id" semantic -// conventions. It represents the unique host ID. For Cloud, this must be the -// instance_id assigned by the cloud provider. For non-containerized systems, -// this should be the `machine-id`. See the table below for the sources to use -// to determine the `machine-id` based on operating system. -func HostID(val string) attribute.KeyValue { - return HostIDKey.String(val) -} - -// HostImageID returns an attribute KeyValue conforming to the -// "host.image.id" semantic conventions. It represents the vM image ID or host -// OS image ID. For Cloud, this value is from the provider. -func HostImageID(val string) attribute.KeyValue { - return HostImageIDKey.String(val) -} - -// HostImageName returns an attribute KeyValue conforming to the -// "host.image.name" semantic conventions. It represents the name of the VM -// image or OS install the host was instantiated from. -func HostImageName(val string) attribute.KeyValue { - return HostImageNameKey.String(val) -} - -// HostImageVersion returns an attribute KeyValue conforming to the -// "host.image.version" semantic conventions. It represents the version string -// of the VM image or host OS as defined in [Version -// Attributes](/docs/resource/README.md#version-attributes). -func HostImageVersion(val string) attribute.KeyValue { - return HostImageVersionKey.String(val) -} - -// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic -// conventions. It represents the available IP addresses of the host, excluding -// loopback interfaces. -func HostIP(val ...string) attribute.KeyValue { - return HostIPKey.StringSlice(val) -} - -// HostMac returns an attribute KeyValue conforming to the "host.mac" -// semantic conventions. It represents the available MAC addresses of the host, -// excluding loopback interfaces. -func HostMac(val ...string) attribute.KeyValue { - return HostMacKey.StringSlice(val) -} - -// HostName returns an attribute KeyValue conforming to the "host.name" -// semantic conventions. It represents the name of the host. On Unix systems, -// it may contain what the hostname command returns, or the fully qualified -// hostname, or another name specified by the user. -func HostName(val string) attribute.KeyValue { - return HostNameKey.String(val) -} - -// HostType returns an attribute KeyValue conforming to the "host.type" -// semantic conventions. It represents the type of host. For Cloud, this must -// be the machine type. -func HostType(val string) attribute.KeyValue { - return HostTypeKey.String(val) -} - -// Semantic convention attributes in the HTTP namespace. -const ( - // HTTPConnectionStateKey is the attribute Key conforming to the - // "http.connection.state" semantic conventions. It represents the state of - // the HTTP connection in the HTTP connection pool. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'active', 'idle' - HTTPConnectionStateKey = attribute.Key("http.connection.state") - - // HTTPRequestBodySizeKey is the attribute Key conforming to the - // "http.request.body.size" semantic conventions. It represents the size of - // the request payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as - // the - // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) - // header. For requests using transport encoding, this should be the - // compressed size. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3495 - HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") - - // HTTPRequestMethodKey is the attribute Key conforming to the - // "http.request.method" semantic conventions. It represents the hTTP - // request method. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'GET', 'POST', 'HEAD' - // Note: HTTP request method value SHOULD be "known" to the - // instrumentation. - // By default, this convention defines "known" methods as the ones listed - // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) - // and the PATCH method defined in - // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). - // - // If the HTTP request method is not known to instrumentation, it MUST set - // the `http.request.method` attribute to `_OTHER`. - // - // If the HTTP instrumentation could end up converting valid HTTP request - // methods to `_OTHER`, then it MUST provide a way to override - // the list of known HTTP methods. If this override is done via environment - // variable, then the environment variable MUST be named - // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated - // list of case-sensitive known HTTP methods - // (this list MUST be a full override of the default known method, it is - // not a list of known methods in addition to the defaults). - // - // HTTP method names are case-sensitive and `http.request.method` attribute - // value MUST match a known HTTP method name exactly. - // Instrumentations for specific web frameworks that consider HTTP methods - // to be case insensitive, SHOULD populate a canonical equivalent. - // Tracing instrumentations that do so, MUST also set - // `http.request.method_original` to the original value. - HTTPRequestMethodKey = attribute.Key("http.request.method") - - // HTTPRequestMethodOriginalKey is the attribute Key conforming to the - // "http.request.method_original" semantic conventions. It represents the - // original HTTP method sent by the client in the request line. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'GeT', 'ACL', 'foo' - HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") - - // HTTPRequestResendCountKey is the attribute Key conforming to the - // "http.request.resend_count" semantic conventions. It represents the - // ordinal number of request resending attempt (for any reason, including - // redirects). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 3 - // Note: The resend count SHOULD be updated each time an HTTP request gets - // resent by the client, regardless of what was the cause of the resending - // (e.g. redirection, authorization failure, 503 Server Unavailable, - // network issues, or any other). - HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") - - // HTTPRequestSizeKey is the attribute Key conforming to the - // "http.request.size" semantic conventions. It represents the total size - // of the request in bytes. This should be the total number of bytes sent - // over the wire, including the request line (HTTP/1.1), framing (HTTP/2 - // and HTTP/3), headers, and request body if any. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1437 - HTTPRequestSizeKey = attribute.Key("http.request.size") - - // HTTPResponseBodySizeKey is the attribute Key conforming to the - // "http.response.body.size" semantic conventions. It represents the size - // of the response payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as - // the - // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) - // header. For requests using transport encoding, this should be the - // compressed size. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3495 - HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") - - // HTTPResponseSizeKey is the attribute Key conforming to the - // "http.response.size" semantic conventions. It represents the total size - // of the response in bytes. This should be the total number of bytes sent - // over the wire, including the status line (HTTP/1.1), framing (HTTP/2 and - // HTTP/3), headers, and response body and trailers if any. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1437 - HTTPResponseSizeKey = attribute.Key("http.response.size") - - // HTTPResponseStatusCodeKey is the attribute Key conforming to the - // "http.response.status_code" semantic conventions. It represents the - // [HTTP response status - // code](https://tools.ietf.org/html/rfc7231#section-6). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 200 - HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") - - // HTTPRouteKey is the attribute Key conforming to the "http.route" - // semantic conventions. It represents the matched route, that is, the path - // template in the format used by the respective server framework. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/users/:userID?', '{controller}/{action}/{id?}' - // Note: MUST NOT be populated when this is not supported by the HTTP - // server framework as the route attribute should have low-cardinality and - // the URI path can NOT substitute it. - // SHOULD include the [application - // root](/docs/http/http-spans.md#http-server-definitions) if there is one. - HTTPRouteKey = attribute.Key("http.route") -) - -var ( - // active state - HTTPConnectionStateActive = HTTPConnectionStateKey.String("active") - // idle state - HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle") -) - -var ( - // CONNECT method - HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") - // DELETE method - HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") - // GET method - HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") - // HEAD method - HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") - // OPTIONS method - HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") - // PATCH method - HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") - // POST method - HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") - // PUT method - HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") - // TRACE method - HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") - // Any HTTP method that the instrumentation has no prior knowledge of - HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") -) - -// HTTPRequestBodySize returns an attribute KeyValue conforming to the -// "http.request.body.size" semantic conventions. It represents the size of the -// request payload body in bytes. This is the number of bytes transferred -// excluding headers and is often, but not always, present as the -// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) -// header. For requests using transport encoding, this should be the compressed -// size. -func HTTPRequestBodySize(val int) attribute.KeyValue { - return HTTPRequestBodySizeKey.Int(val) -} - -// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the -// "http.request.method_original" semantic conventions. It represents the -// original HTTP method sent by the client in the request line. -func HTTPRequestMethodOriginal(val string) attribute.KeyValue { - return HTTPRequestMethodOriginalKey.String(val) -} - -// HTTPRequestResendCount returns an attribute KeyValue conforming to the -// "http.request.resend_count" semantic conventions. It represents the ordinal -// number of request resending attempt (for any reason, including redirects). -func HTTPRequestResendCount(val int) attribute.KeyValue { - return HTTPRequestResendCountKey.Int(val) -} - -// HTTPRequestSize returns an attribute KeyValue conforming to the -// "http.request.size" semantic conventions. It represents the total size of -// the request in bytes. This should be the total number of bytes sent over the -// wire, including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), -// headers, and request body if any. -func HTTPRequestSize(val int) attribute.KeyValue { - return HTTPRequestSizeKey.Int(val) -} - -// HTTPResponseBodySize returns an attribute KeyValue conforming to the -// "http.response.body.size" semantic conventions. It represents the size of -// the response payload body in bytes. This is the number of bytes transferred -// excluding headers and is often, but not always, present as the -// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) -// header. For requests using transport encoding, this should be the compressed -// size. -func HTTPResponseBodySize(val int) attribute.KeyValue { - return HTTPResponseBodySizeKey.Int(val) -} - -// HTTPResponseSize returns an attribute KeyValue conforming to the -// "http.response.size" semantic conventions. It represents the total size of -// the response in bytes. This should be the total number of bytes sent over -// the wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), -// headers, and response body and trailers if any. -func HTTPResponseSize(val int) attribute.KeyValue { - return HTTPResponseSizeKey.Int(val) -} - -// HTTPResponseStatusCode returns an attribute KeyValue conforming to the -// "http.response.status_code" semantic conventions. It represents the [HTTP -// response status code](https://tools.ietf.org/html/rfc7231#section-6). -func HTTPResponseStatusCode(val int) attribute.KeyValue { - return HTTPResponseStatusCodeKey.Int(val) -} - -// HTTPRoute returns an attribute KeyValue conforming to the "http.route" -// semantic conventions. It represents the matched route, that is, the path -// template in the format used by the respective server framework. -func HTTPRoute(val string) attribute.KeyValue { - return HTTPRouteKey.String(val) -} - -// Java Virtual machine related attributes. -const ( - // JvmBufferPoolNameKey is the attribute Key conforming to the - // "jvm.buffer.pool.name" semantic conventions. It represents the name of - // the buffer pool. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mapped', 'direct' - // Note: Pool names are generally obtained via - // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()). - JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name") - - // JvmGcActionKey is the attribute Key conforming to the "jvm.gc.action" - // semantic conventions. It represents the name of the garbage collector - // action. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'end of minor GC', 'end of major GC' - // Note: Garbage collector action is generally obtained via - // [GarbageCollectionNotificationInfo#getGcAction()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcAction()). - JvmGcActionKey = attribute.Key("jvm.gc.action") - - // JvmGcNameKey is the attribute Key conforming to the "jvm.gc.name" - // semantic conventions. It represents the name of the garbage collector. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'G1 Young Generation', 'G1 Old Generation' - // Note: Garbage collector name is generally obtained via - // [GarbageCollectionNotificationInfo#getGcName()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcName()). - JvmGcNameKey = attribute.Key("jvm.gc.name") - - // JvmMemoryPoolNameKey is the attribute Key conforming to the - // "jvm.memory.pool.name" semantic conventions. It represents the name of - // the memory pool. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' - // Note: Pool names are generally obtained via - // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). - JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name") - - // JvmMemoryTypeKey is the attribute Key conforming to the - // "jvm.memory.type" semantic conventions. It represents the type of - // memory. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'heap', 'non_heap' - JvmMemoryTypeKey = attribute.Key("jvm.memory.type") - - // JvmThreadDaemonKey is the attribute Key conforming to the - // "jvm.thread.daemon" semantic conventions. It represents the whether the - // thread is daemon or not. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - JvmThreadDaemonKey = attribute.Key("jvm.thread.daemon") - - // JvmThreadStateKey is the attribute Key conforming to the - // "jvm.thread.state" semantic conventions. It represents the state of the - // thread. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'runnable', 'blocked' - JvmThreadStateKey = attribute.Key("jvm.thread.state") -) - -var ( - // Heap memory - JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap") - // Non-heap memory - JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap") -) - -var ( - // A thread that has not yet started is in this state - JvmThreadStateNew = JvmThreadStateKey.String("new") - // A thread executing in the Java virtual machine is in this state - JvmThreadStateRunnable = JvmThreadStateKey.String("runnable") - // A thread that is blocked waiting for a monitor lock is in this state - JvmThreadStateBlocked = JvmThreadStateKey.String("blocked") - // A thread that is waiting indefinitely for another thread to perform a particular action is in this state - JvmThreadStateWaiting = JvmThreadStateKey.String("waiting") - // A thread that is waiting for another thread to perform an action for up to a specified waiting time is in this state - JvmThreadStateTimedWaiting = JvmThreadStateKey.String("timed_waiting") - // A thread that has exited is in this state - JvmThreadStateTerminated = JvmThreadStateKey.String("terminated") -) - -// JvmBufferPoolName returns an attribute KeyValue conforming to the -// "jvm.buffer.pool.name" semantic conventions. It represents the name of the -// buffer pool. -func JvmBufferPoolName(val string) attribute.KeyValue { - return JvmBufferPoolNameKey.String(val) -} - -// JvmGcAction returns an attribute KeyValue conforming to the -// "jvm.gc.action" semantic conventions. It represents the name of the garbage -// collector action. -func JvmGcAction(val string) attribute.KeyValue { - return JvmGcActionKey.String(val) -} - -// JvmGcName returns an attribute KeyValue conforming to the "jvm.gc.name" -// semantic conventions. It represents the name of the garbage collector. -func JvmGcName(val string) attribute.KeyValue { - return JvmGcNameKey.String(val) -} - -// JvmMemoryPoolName returns an attribute KeyValue conforming to the -// "jvm.memory.pool.name" semantic conventions. It represents the name of the -// memory pool. -func JvmMemoryPoolName(val string) attribute.KeyValue { - return JvmMemoryPoolNameKey.String(val) -} - -// JvmThreadDaemon returns an attribute KeyValue conforming to the -// "jvm.thread.daemon" semantic conventions. It represents the whether the -// thread is daemon or not. -func JvmThreadDaemon(val bool) attribute.KeyValue { - return JvmThreadDaemonKey.Bool(val) -} - -// Kubernetes resource attributes. -const ( - // K8SClusterNameKey is the attribute Key conforming to the - // "k8s.cluster.name" semantic conventions. It represents the name of the - // cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-cluster' - K8SClusterNameKey = attribute.Key("k8s.cluster.name") - - // K8SClusterUIDKey is the attribute Key conforming to the - // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for - // the cluster, set to the UID of the `kube-system` namespace. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' - // Note: K8S doesn't have support for obtaining a cluster ID. If this is - // ever - // added, we will recommend collecting the `k8s.cluster.uid` through the - // official APIs. In the meantime, we are able to use the `uid` of the - // `kube-system` namespace as a proxy for cluster ID. Read on for the - // rationale. - // - // Every object created in a K8S cluster is assigned a distinct UID. The - // `kube-system` namespace is used by Kubernetes itself and will exist - // for the lifetime of the cluster. Using the `uid` of the `kube-system` - // namespace is a reasonable proxy for the K8S ClusterID as it will only - // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are - // UUIDs as standardized by - // [ISO/IEC 9834-8 and ITU-T - // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). - // Which states: - // - // > If generated according to one of the mechanisms defined in Rec. - // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be - // different from all other UUIDs generated before 3603 A.D., or is - // extremely likely to be different (depending on the mechanism chosen). - // - // Therefore, UIDs between clusters should be extremely unlikely to - // conflict. - K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") - - // K8SContainerNameKey is the attribute Key conforming to the - // "k8s.container.name" semantic conventions. It represents the name of the - // Container from Pod specification, must be unique within a Pod. Container - // runtime usually uses different globally unique name (`container.name`). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'redis' - K8SContainerNameKey = attribute.Key("k8s.container.name") - - // K8SContainerRestartCountKey is the attribute Key conforming to the - // "k8s.container.restart_count" semantic conventions. It represents the - // number of times the container was restarted. This attribute can be used - // to identify a particular container (running or stopped) within a - // container spec. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") - - // K8SContainerStatusLastTerminatedReasonKey is the attribute Key - // conforming to the "k8s.container.status.last_terminated_reason" semantic - // conventions. It represents the last terminated reason of the Container. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Evicted', 'Error' - K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason") - - // K8SCronJobNameKey is the attribute Key conforming to the - // "k8s.cronjob.name" semantic conventions. It represents the name of the - // CronJob. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") - - // K8SCronJobUIDKey is the attribute Key conforming to the - // "k8s.cronjob.uid" semantic conventions. It represents the UID of the - // CronJob. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") - - // K8SDaemonSetNameKey is the attribute Key conforming to the - // "k8s.daemonset.name" semantic conventions. It represents the name of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") - - // K8SDaemonSetUIDKey is the attribute Key conforming to the - // "k8s.daemonset.uid" semantic conventions. It represents the UID of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") - - // K8SDeploymentNameKey is the attribute Key conforming to the - // "k8s.deployment.name" semantic conventions. It represents the name of - // the Deployment. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") - - // K8SDeploymentUIDKey is the attribute Key conforming to the - // "k8s.deployment.uid" semantic conventions. It represents the UID of the - // Deployment. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") - - // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" - // semantic conventions. It represents the name of the Job. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SJobNameKey = attribute.Key("k8s.job.name") - - // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" - // semantic conventions. It represents the UID of the Job. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SJobUIDKey = attribute.Key("k8s.job.uid") - - // K8SNamespaceNameKey is the attribute Key conforming to the - // "k8s.namespace.name" semantic conventions. It represents the name of the - // namespace that the pod is running in. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'default' - K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") - - // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" - // semantic conventions. It represents the name of the Node. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'node-1' - K8SNodeNameKey = attribute.Key("k8s.node.name") - - // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" - // semantic conventions. It represents the UID of the Node. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' - K8SNodeUIDKey = attribute.Key("k8s.node.uid") - - // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" - // semantic conventions. It represents the name of the Pod. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-pod-autoconf' - K8SPodNameKey = attribute.Key("k8s.pod.name") - - // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" - // semantic conventions. It represents the UID of the Pod. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SPodUIDKey = attribute.Key("k8s.pod.uid") - - // K8SReplicaSetNameKey is the attribute Key conforming to the - // "k8s.replicaset.name" semantic conventions. It represents the name of - // the ReplicaSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") - - // K8SReplicaSetUIDKey is the attribute Key conforming to the - // "k8s.replicaset.uid" semantic conventions. It represents the UID of the - // ReplicaSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") - - // K8SStatefulSetNameKey is the attribute Key conforming to the - // "k8s.statefulset.name" semantic conventions. It represents the name of - // the StatefulSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") - - // K8SStatefulSetUIDKey is the attribute Key conforming to the - // "k8s.statefulset.uid" semantic conventions. It represents the UID of the - // StatefulSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") -) - -// K8SClusterName returns an attribute KeyValue conforming to the -// "k8s.cluster.name" semantic conventions. It represents the name of the -// cluster. -func K8SClusterName(val string) attribute.KeyValue { - return K8SClusterNameKey.String(val) -} - -// K8SClusterUID returns an attribute KeyValue conforming to the -// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the -// cluster, set to the UID of the `kube-system` namespace. -func K8SClusterUID(val string) attribute.KeyValue { - return K8SClusterUIDKey.String(val) -} - -// K8SContainerName returns an attribute KeyValue conforming to the -// "k8s.container.name" semantic conventions. It represents the name of the -// Container from Pod specification, must be unique within a Pod. Container -// runtime usually uses different globally unique name (`container.name`). -func K8SContainerName(val string) attribute.KeyValue { - return K8SContainerNameKey.String(val) -} - -// K8SContainerRestartCount returns an attribute KeyValue conforming to the -// "k8s.container.restart_count" semantic conventions. It represents the number -// of times the container was restarted. This attribute can be used to identify -// a particular container (running or stopped) within a container spec. -func K8SContainerRestartCount(val int) attribute.KeyValue { - return K8SContainerRestartCountKey.Int(val) -} - -// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue -// conforming to the "k8s.container.status.last_terminated_reason" semantic -// conventions. It represents the last terminated reason of the Container. -func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue { - return K8SContainerStatusLastTerminatedReasonKey.String(val) -} - -// K8SCronJobName returns an attribute KeyValue conforming to the -// "k8s.cronjob.name" semantic conventions. It represents the name of the -// CronJob. -func K8SCronJobName(val string) attribute.KeyValue { - return K8SCronJobNameKey.String(val) -} - -// K8SCronJobUID returns an attribute KeyValue conforming to the -// "k8s.cronjob.uid" semantic conventions. It represents the UID of the -// CronJob. -func K8SCronJobUID(val string) attribute.KeyValue { - return K8SCronJobUIDKey.String(val) -} - -// K8SDaemonSetName returns an attribute KeyValue conforming to the -// "k8s.daemonset.name" semantic conventions. It represents the name of the -// DaemonSet. -func K8SDaemonSetName(val string) attribute.KeyValue { - return K8SDaemonSetNameKey.String(val) -} - -// K8SDaemonSetUID returns an attribute KeyValue conforming to the -// "k8s.daemonset.uid" semantic conventions. It represents the UID of the -// DaemonSet. -func K8SDaemonSetUID(val string) attribute.KeyValue { - return K8SDaemonSetUIDKey.String(val) -} - -// K8SDeploymentName returns an attribute KeyValue conforming to the -// "k8s.deployment.name" semantic conventions. It represents the name of the -// Deployment. -func K8SDeploymentName(val string) attribute.KeyValue { - return K8SDeploymentNameKey.String(val) -} - -// K8SDeploymentUID returns an attribute KeyValue conforming to the -// "k8s.deployment.uid" semantic conventions. It represents the UID of the -// Deployment. -func K8SDeploymentUID(val string) attribute.KeyValue { - return K8SDeploymentUIDKey.String(val) -} - -// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" -// semantic conventions. It represents the name of the Job. -func K8SJobName(val string) attribute.KeyValue { - return K8SJobNameKey.String(val) -} - -// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" -// semantic conventions. It represents the UID of the Job. -func K8SJobUID(val string) attribute.KeyValue { - return K8SJobUIDKey.String(val) -} - -// K8SNamespaceName returns an attribute KeyValue conforming to the -// "k8s.namespace.name" semantic conventions. It represents the name of the -// namespace that the pod is running in. -func K8SNamespaceName(val string) attribute.KeyValue { - return K8SNamespaceNameKey.String(val) -} - -// K8SNodeName returns an attribute KeyValue conforming to the -// "k8s.node.name" semantic conventions. It represents the name of the Node. -func K8SNodeName(val string) attribute.KeyValue { - return K8SNodeNameKey.String(val) -} - -// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" -// semantic conventions. It represents the UID of the Node. -func K8SNodeUID(val string) attribute.KeyValue { - return K8SNodeUIDKey.String(val) -} - -// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" -// semantic conventions. It represents the name of the Pod. -func K8SPodName(val string) attribute.KeyValue { - return K8SPodNameKey.String(val) -} - -// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" -// semantic conventions. It represents the UID of the Pod. -func K8SPodUID(val string) attribute.KeyValue { - return K8SPodUIDKey.String(val) -} - -// K8SReplicaSetName returns an attribute KeyValue conforming to the -// "k8s.replicaset.name" semantic conventions. It represents the name of the -// ReplicaSet. -func K8SReplicaSetName(val string) attribute.KeyValue { - return K8SReplicaSetNameKey.String(val) -} - -// K8SReplicaSetUID returns an attribute KeyValue conforming to the -// "k8s.replicaset.uid" semantic conventions. It represents the UID of the -// ReplicaSet. -func K8SReplicaSetUID(val string) attribute.KeyValue { - return K8SReplicaSetUIDKey.String(val) -} - -// K8SStatefulSetName returns an attribute KeyValue conforming to the -// "k8s.statefulset.name" semantic conventions. It represents the name of the -// StatefulSet. -func K8SStatefulSetName(val string) attribute.KeyValue { - return K8SStatefulSetNameKey.String(val) -} - -// K8SStatefulSetUID returns an attribute KeyValue conforming to the -// "k8s.statefulset.uid" semantic conventions. It represents the UID of the -// StatefulSet. -func K8SStatefulSetUID(val string) attribute.KeyValue { - return K8SStatefulSetUIDKey.String(val) -} - -// Log attributes -const ( - // LogIostreamKey is the attribute Key conforming to the "log.iostream" - // semantic conventions. It represents the stream associated with the log. - // See below for a list of well-known values. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - LogIostreamKey = attribute.Key("log.iostream") -) - -var ( - // Logs from stdout stream - LogIostreamStdout = LogIostreamKey.String("stdout") - // Events from stderr stream - LogIostreamStderr = LogIostreamKey.String("stderr") -) - -// Attributes for a file to which log was emitted. -const ( - // LogFileNameKey is the attribute Key conforming to the "log.file.name" - // semantic conventions. It represents the basename of the file. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'audit.log' - LogFileNameKey = attribute.Key("log.file.name") - - // LogFileNameResolvedKey is the attribute Key conforming to the - // "log.file.name_resolved" semantic conventions. It represents the - // basename of the file, with symlinks resolved. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'uuid.log' - LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") - - // LogFilePathKey is the attribute Key conforming to the "log.file.path" - // semantic conventions. It represents the full path to the file. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/var/log/mysql/audit.log' - LogFilePathKey = attribute.Key("log.file.path") - - // LogFilePathResolvedKey is the attribute Key conforming to the - // "log.file.path_resolved" semantic conventions. It represents the full - // path to the file, with symlinks resolved. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/var/lib/docker/uuid.log' - LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") -) - -// LogFileName returns an attribute KeyValue conforming to the -// "log.file.name" semantic conventions. It represents the basename of the -// file. -func LogFileName(val string) attribute.KeyValue { - return LogFileNameKey.String(val) -} - -// LogFileNameResolved returns an attribute KeyValue conforming to the -// "log.file.name_resolved" semantic conventions. It represents the basename of -// the file, with symlinks resolved. -func LogFileNameResolved(val string) attribute.KeyValue { - return LogFileNameResolvedKey.String(val) -} - -// LogFilePath returns an attribute KeyValue conforming to the -// "log.file.path" semantic conventions. It represents the full path to the -// file. -func LogFilePath(val string) attribute.KeyValue { - return LogFilePathKey.String(val) -} - -// LogFilePathResolved returns an attribute KeyValue conforming to the -// "log.file.path_resolved" semantic conventions. It represents the full path -// to the file, with symlinks resolved. -func LogFilePathResolved(val string) attribute.KeyValue { - return LogFilePathResolvedKey.String(val) -} - -// The generic attributes that may be used in any Log Record. -const ( - // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" - // semantic conventions. It represents a unique identifier for the Log - // Record. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' - // Note: If an id is provided, other log records with the same id will be - // considered duplicates and can be removed safely. This means, that two - // distinguishable log records MUST have different values. - // The id MAY be an [Universally Unique Lexicographically Sortable - // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers - // (e.g. UUID) may be used as needed. - LogRecordUIDKey = attribute.Key("log.record.uid") -) - -// LogRecordUID returns an attribute KeyValue conforming to the -// "log.record.uid" semantic conventions. It represents a unique identifier for -// the Log Record. -func LogRecordUID(val string) attribute.KeyValue { - return LogRecordUIDKey.String(val) -} - -// Attributes describing telemetry around messaging systems and messaging -// activities. -const ( - // MessagingBatchMessageCountKey is the attribute Key conforming to the - // "messaging.batch.message_count" semantic conventions. It represents the - // number of messages sent, received, or processed in the scope of the - // batching operation. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 1, 2 - // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on - // spans that operate with a single message. When a messaging client - // library supports both batch and single-message API for the same - // operation, instrumentations SHOULD use `messaging.batch.message_count` - // for batching APIs and SHOULD NOT use it for single-message APIs. - MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") - - // MessagingClientIDKey is the attribute Key conforming to the - // "messaging.client.id" semantic conventions. It represents a unique - // identifier for the client that consumes or produces a message. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'client-5', 'myhost@8742@s8083jm' - MessagingClientIDKey = attribute.Key("messaging.client.id") - - // MessagingDestinationAnonymousKey is the attribute Key conforming to the - // "messaging.destination.anonymous" semantic conventions. It represents a - // boolean that is true if the message destination is anonymous (could be - // unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") - - // MessagingDestinationNameKey is the attribute Key conforming to the - // "messaging.destination.name" semantic conventions. It represents the - // message destination name - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MyQueue', 'MyTopic' - // Note: Destination name SHOULD uniquely identify a specific queue, topic - // or other entity within the broker. If - // the broker doesn't have such notion, the destination name SHOULD - // uniquely identify the broker. - MessagingDestinationNameKey = attribute.Key("messaging.destination.name") - - // MessagingDestinationPartitionIDKey is the attribute Key conforming to - // the "messaging.destination.partition.id" semantic conventions. It - // represents the identifier of the partition messages are sent to or - // received from, unique within the `messaging.destination.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1' - MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id") - - // MessagingDestinationTemplateKey is the attribute Key conforming to the - // "messaging.destination.template" semantic conventions. It represents the - // low cardinality representation of the messaging destination name - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/customers/{customerID}' - // Note: Destination names could be constructed from templates. An example - // would be a destination name involving a user name or product id. - // Although the destination name in this case is of high cardinality, the - // underlying template is of low cardinality and can be effectively used - // for grouping and aggregation. - MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") - - // MessagingDestinationTemporaryKey is the attribute Key conforming to the - // "messaging.destination.temporary" semantic conventions. It represents a - // boolean that is true if the message destination is temporary and might - // not exist anymore after messages are processed. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") - - // MessagingDestinationPublishAnonymousKey is the attribute Key conforming - // to the "messaging.destination_publish.anonymous" semantic conventions. - // It represents a boolean that is true if the publish message destination - // is anonymous (could be unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous") - - // MessagingDestinationPublishNameKey is the attribute Key conforming to - // the "messaging.destination_publish.name" semantic conventions. It - // represents the name of the original destination the message was - // published to - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MyQueue', 'MyTopic' - // Note: The name SHOULD uniquely identify a specific queue, topic, or - // other entity within the broker. If - // the broker doesn't have such notion, the original destination name - // SHOULD uniquely identify the broker. - MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name") - - // MessagingMessageBodySizeKey is the attribute Key conforming to the - // "messaging.message.body.size" semantic conventions. It represents the - // size of the message body in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1439 - // Note: This can refer to both the compressed or uncompressed body size. - // If both sizes are known, the uncompressed - // body size should be used. - MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") - - // MessagingMessageConversationIDKey is the attribute Key conforming to the - // "messaging.message.conversation_id" semantic conventions. It represents - // the conversation ID identifying the conversation to which the message - // belongs, represented as a string. Sometimes called "Correlation ID". - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MyConversationID' - MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") - - // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the - // "messaging.message.envelope.size" semantic conventions. It represents - // the size of the message body and metadata in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 2738 - // Note: This can refer to both the compressed or uncompressed size. If - // both sizes are known, the uncompressed - // size should be used. - MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") - - // MessagingMessageIDKey is the attribute Key conforming to the - // "messaging.message.id" semantic conventions. It represents a value used - // by the messaging system as an identifier for the message, represented as - // a string. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '452a7c7c7c7048c2f887f61572b18fc2' - MessagingMessageIDKey = attribute.Key("messaging.message.id") - - // MessagingOperationNameKey is the attribute Key conforming to the - // "messaging.operation.name" semantic conventions. It represents the - // system-specific name of the messaging operation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ack', 'nack', 'send' - MessagingOperationNameKey = attribute.Key("messaging.operation.name") - - // MessagingOperationTypeKey is the attribute Key conforming to the - // "messaging.operation.type" semantic conventions. It represents a string - // identifying the type of the messaging operation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: If a custom value is used, it MUST be of low cardinality. - MessagingOperationTypeKey = attribute.Key("messaging.operation.type") - - // MessagingSystemKey is the attribute Key conforming to the - // "messaging.system" semantic conventions. It represents the messaging - // system as identified by the client instrumentation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The actual messaging system may differ from the one known by the - // client. For example, when using Kafka client libraries to communicate - // with Azure Event Hubs, the `messaging.system` is set to `kafka` based on - // the instrumentation's best knowledge. - MessagingSystemKey = attribute.Key("messaging.system") -) - -var ( - // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created - MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish") - // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios - MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create") - // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages - MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive") - // One or more messages are delivered to or processed by a consumer - MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("process") - // One or more messages are settled - MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle") -) - -var ( - // Apache ActiveMQ - MessagingSystemActivemq = MessagingSystemKey.String("activemq") - // Amazon Simple Queue Service (SQS) - MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs") - // Azure Event Grid - MessagingSystemEventgrid = MessagingSystemKey.String("eventgrid") - // Azure Event Hubs - MessagingSystemEventhubs = MessagingSystemKey.String("eventhubs") - // Azure Service Bus - MessagingSystemServicebus = MessagingSystemKey.String("servicebus") - // Google Cloud Pub/Sub - MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub") - // Java Message Service - MessagingSystemJms = MessagingSystemKey.String("jms") - // Apache Kafka - MessagingSystemKafka = MessagingSystemKey.String("kafka") - // RabbitMQ - MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq") - // Apache RocketMQ - MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq") -) - -// MessagingBatchMessageCount returns an attribute KeyValue conforming to -// the "messaging.batch.message_count" semantic conventions. It represents the -// number of messages sent, received, or processed in the scope of the batching -// operation. -func MessagingBatchMessageCount(val int) attribute.KeyValue { - return MessagingBatchMessageCountKey.Int(val) -} - -// MessagingClientID returns an attribute KeyValue conforming to the -// "messaging.client.id" semantic conventions. It represents a unique -// identifier for the client that consumes or produces a message. -func MessagingClientID(val string) attribute.KeyValue { - return MessagingClientIDKey.String(val) -} - -// MessagingDestinationAnonymous returns an attribute KeyValue conforming to -// the "messaging.destination.anonymous" semantic conventions. It represents a -// boolean that is true if the message destination is anonymous (could be -// unnamed or have auto-generated name). -func MessagingDestinationAnonymous(val bool) attribute.KeyValue { - return MessagingDestinationAnonymousKey.Bool(val) -} - -// MessagingDestinationName returns an attribute KeyValue conforming to the -// "messaging.destination.name" semantic conventions. It represents the message -// destination name -func MessagingDestinationName(val string) attribute.KeyValue { - return MessagingDestinationNameKey.String(val) -} - -// MessagingDestinationPartitionID returns an attribute KeyValue conforming -// to the "messaging.destination.partition.id" semantic conventions. It -// represents the identifier of the partition messages are sent to or received -// from, unique within the `messaging.destination.name`. -func MessagingDestinationPartitionID(val string) attribute.KeyValue { - return MessagingDestinationPartitionIDKey.String(val) -} - -// MessagingDestinationTemplate returns an attribute KeyValue conforming to -// the "messaging.destination.template" semantic conventions. It represents the -// low cardinality representation of the messaging destination name -func MessagingDestinationTemplate(val string) attribute.KeyValue { - return MessagingDestinationTemplateKey.String(val) -} - -// MessagingDestinationTemporary returns an attribute KeyValue conforming to -// the "messaging.destination.temporary" semantic conventions. It represents a -// boolean that is true if the message destination is temporary and might not -// exist anymore after messages are processed. -func MessagingDestinationTemporary(val bool) attribute.KeyValue { - return MessagingDestinationTemporaryKey.Bool(val) -} - -// MessagingDestinationPublishAnonymous returns an attribute KeyValue -// conforming to the "messaging.destination_publish.anonymous" semantic -// conventions. It represents a boolean that is true if the publish message -// destination is anonymous (could be unnamed or have auto-generated name). -func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue { - return MessagingDestinationPublishAnonymousKey.Bool(val) -} - -// MessagingDestinationPublishName returns an attribute KeyValue conforming -// to the "messaging.destination_publish.name" semantic conventions. It -// represents the name of the original destination the message was published to -func MessagingDestinationPublishName(val string) attribute.KeyValue { - return MessagingDestinationPublishNameKey.String(val) -} - -// MessagingMessageBodySize returns an attribute KeyValue conforming to the -// "messaging.message.body.size" semantic conventions. It represents the size -// of the message body in bytes. -func MessagingMessageBodySize(val int) attribute.KeyValue { - return MessagingMessageBodySizeKey.Int(val) -} - -// MessagingMessageConversationID returns an attribute KeyValue conforming -// to the "messaging.message.conversation_id" semantic conventions. It -// represents the conversation ID identifying the conversation to which the -// message belongs, represented as a string. Sometimes called "Correlation ID". -func MessagingMessageConversationID(val string) attribute.KeyValue { - return MessagingMessageConversationIDKey.String(val) -} - -// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to -// the "messaging.message.envelope.size" semantic conventions. It represents -// the size of the message body and metadata in bytes. -func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { - return MessagingMessageEnvelopeSizeKey.Int(val) -} - -// MessagingMessageID returns an attribute KeyValue conforming to the -// "messaging.message.id" semantic conventions. It represents a value used by -// the messaging system as an identifier for the message, represented as a -// string. -func MessagingMessageID(val string) attribute.KeyValue { - return MessagingMessageIDKey.String(val) -} - -// MessagingOperationName returns an attribute KeyValue conforming to the -// "messaging.operation.name" semantic conventions. It represents the -// system-specific name of the messaging operation. -func MessagingOperationName(val string) attribute.KeyValue { - return MessagingOperationNameKey.String(val) -} - -// This group describes attributes specific to Apache Kafka. -const ( - // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the - // "messaging.kafka.consumer.group" semantic conventions. It represents the - // name of the Kafka Consumer Group that is handling the message. Only - // applies to consumers, not producers. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-group' - MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") - - // MessagingKafkaMessageKeyKey is the attribute Key conforming to the - // "messaging.kafka.message.key" semantic conventions. It represents the - // message keys in Kafka are used for grouping alike messages to ensure - // they're processed on the same partition. They differ from - // `messaging.message.id` in that they're not unique. If the key is `null`, - // the attribute MUST NOT be set. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myKey' - // Note: If the key type is not string, it's string representation has to - // be supplied for the attribute. If the key has no unambiguous, canonical - // string form, don't include its value. - MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") - - // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the - // "messaging.kafka.message.offset" semantic conventions. It represents the - // offset of a record in the corresponding Kafka partition. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") - - // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the - // "messaging.kafka.message.tombstone" semantic conventions. It represents - // a boolean that is true if the message is a tombstone. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") -) - -// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to -// the "messaging.kafka.consumer.group" semantic conventions. It represents the -// name of the Kafka Consumer Group that is handling the message. Only applies -// to consumers, not producers. -func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { - return MessagingKafkaConsumerGroupKey.String(val) -} - -// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the -// "messaging.kafka.message.key" semantic conventions. It represents the -// message keys in Kafka are used for grouping alike messages to ensure they're -// processed on the same partition. They differ from `messaging.message.id` in -// that they're not unique. If the key is `null`, the attribute MUST NOT be -// set. -func MessagingKafkaMessageKey(val string) attribute.KeyValue { - return MessagingKafkaMessageKeyKey.String(val) -} - -// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to -// the "messaging.kafka.message.offset" semantic conventions. It represents the -// offset of a record in the corresponding Kafka partition. -func MessagingKafkaMessageOffset(val int) attribute.KeyValue { - return MessagingKafkaMessageOffsetKey.Int(val) -} - -// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming -// to the "messaging.kafka.message.tombstone" semantic conventions. It -// represents a boolean that is true if the message is a tombstone. -func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { - return MessagingKafkaMessageTombstoneKey.Bool(val) -} - -// This group describes attributes specific to RabbitMQ. -const ( - // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key - // conforming to the "messaging.rabbitmq.destination.routing_key" semantic - // conventions. It represents the rabbitMQ message routing key. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myKey' - MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") - - // MessagingRabbitmqMessageDeliveryTagKey is the attribute Key conforming - // to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. - // It represents the rabbitMQ message delivery tag - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 123 - MessagingRabbitmqMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag") -) - -// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue -// conforming to the "messaging.rabbitmq.destination.routing_key" semantic -// conventions. It represents the rabbitMQ message routing key. -func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { - return MessagingRabbitmqDestinationRoutingKeyKey.String(val) -} - -// MessagingRabbitmqMessageDeliveryTag returns an attribute KeyValue -// conforming to the "messaging.rabbitmq.message.delivery_tag" semantic -// conventions. It represents the rabbitMQ message delivery tag -func MessagingRabbitmqMessageDeliveryTag(val int) attribute.KeyValue { - return MessagingRabbitmqMessageDeliveryTagKey.Int(val) -} - -// This group describes attributes specific to RocketMQ. -const ( - // MessagingRocketmqClientGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.client_group" semantic conventions. It represents - // the name of the RocketMQ producer/consumer group that is handling the - // message. The client type is identified by the SpanKind. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myConsumerGroup' - MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") - - // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to - // the "messaging.rocketmq.consumption_model" semantic conventions. It - // represents the model of message consumption. This only applies to - // consumer spans. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") - - // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key - // conforming to the "messaging.rocketmq.message.delay_time_level" semantic - // conventions. It represents the delay time level for delay message, which - // determines the message delay time. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3 - MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") - - // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key - // conforming to the "messaging.rocketmq.message.delivery_timestamp" - // semantic conventions. It represents the timestamp in milliseconds that - // the delay message is expected to be delivered to consumer. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1665987217045 - MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") - - // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.message.group" semantic conventions. It represents - // the it is essential for FIFO message. Messages that belong to the same - // message group are always processed one by one within the same consumer - // group. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myMessageGroup' - MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") - - // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the - // "messaging.rocketmq.message.keys" semantic conventions. It represents - // the key(s) of message, another way to mark message besides message id. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'keyA', 'keyB' - MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") - - // MessagingRocketmqMessageTagKey is the attribute Key conforming to the - // "messaging.rocketmq.message.tag" semantic conventions. It represents the - // secondary classifier of message besides topic. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'tagA' - MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") - - // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the - // "messaging.rocketmq.message.type" semantic conventions. It represents - // the type of message. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") - - // MessagingRocketmqNamespaceKey is the attribute Key conforming to the - // "messaging.rocketmq.namespace" semantic conventions. It represents the - // namespace of RocketMQ resources, resources in different namespaces are - // individual. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myNamespace' - MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") -) - -var ( - // Clustering consumption model - MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") - // Broadcasting consumption model - MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") -) - -var ( - // Normal message - MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") - // FIFO message - MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") - // Delay message - MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") - // Transaction message - MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") -) - -// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to -// the "messaging.rocketmq.client_group" semantic conventions. It represents -// the name of the RocketMQ producer/consumer group that is handling the -// message. The client type is identified by the SpanKind. -func MessagingRocketmqClientGroup(val string) attribute.KeyValue { - return MessagingRocketmqClientGroupKey.String(val) -} - -// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delay_time_level" semantic -// conventions. It represents the delay time level for delay message, which -// determines the message delay time. -func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { - return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) -} - -// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic -// conventions. It represents the timestamp in milliseconds that the delay -// message is expected to be delivered to consumer. -func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { - return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) -} - -// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.group" semantic conventions. It represents -// the it is essential for FIFO message. Messages that belong to the same -// message group are always processed one by one within the same consumer -// group. -func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { - return MessagingRocketmqMessageGroupKey.String(val) -} - -// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.keys" semantic conventions. It represents -// the key(s) of message, another way to mark message besides message id. -func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { - return MessagingRocketmqMessageKeysKey.StringSlice(val) -} - -// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.tag" semantic conventions. It represents the -// secondary classifier of message besides topic. -func MessagingRocketmqMessageTag(val string) attribute.KeyValue { - return MessagingRocketmqMessageTagKey.String(val) -} - -// MessagingRocketmqNamespace returns an attribute KeyValue conforming to -// the "messaging.rocketmq.namespace" semantic conventions. It represents the -// namespace of RocketMQ resources, resources in different namespaces are -// individual. -func MessagingRocketmqNamespace(val string) attribute.KeyValue { - return MessagingRocketmqNamespaceKey.String(val) -} - -// This group describes attributes specific to GCP Pub/Sub. -const ( - // MessagingGCPPubsubMessageAckDeadlineKey is the attribute Key conforming - // to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. - // It represents the ack deadline in seconds set for the modify ack - // deadline request. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - MessagingGCPPubsubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline") - - // MessagingGCPPubsubMessageAckIDKey is the attribute Key conforming to the - // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It - // represents the ack id for a given message. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ack_id' - MessagingGCPPubsubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id") - - // MessagingGCPPubsubMessageDeliveryAttemptKey is the attribute Key - // conforming to the "messaging.gcp_pubsub.message.delivery_attempt" - // semantic conventions. It represents the delivery attempt for a given - // message. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 2 - MessagingGCPPubsubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt") - - // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming - // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. - // It represents the ordering key for a given message. If the attribute is - // not present, the message does not have an ordering key. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ordering_key' - MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") -) - -// MessagingGCPPubsubMessageAckDeadline returns an attribute KeyValue -// conforming to the "messaging.gcp_pubsub.message.ack_deadline" semantic -// conventions. It represents the ack deadline in seconds set for the modify -// ack deadline request. -func MessagingGCPPubsubMessageAckDeadline(val int) attribute.KeyValue { - return MessagingGCPPubsubMessageAckDeadlineKey.Int(val) -} - -// MessagingGCPPubsubMessageAckID returns an attribute KeyValue conforming -// to the "messaging.gcp_pubsub.message.ack_id" semantic conventions. It -// represents the ack id for a given message. -func MessagingGCPPubsubMessageAckID(val string) attribute.KeyValue { - return MessagingGCPPubsubMessageAckIDKey.String(val) -} - -// MessagingGCPPubsubMessageDeliveryAttempt returns an attribute KeyValue -// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic -// conventions. It represents the delivery attempt for a given message. -func MessagingGCPPubsubMessageDeliveryAttempt(val int) attribute.KeyValue { - return MessagingGCPPubsubMessageDeliveryAttemptKey.Int(val) -} - -// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue -// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic -// conventions. It represents the ordering key for a given message. If the -// attribute is not present, the message does not have an ordering key. -func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue { - return MessagingGCPPubsubMessageOrderingKeyKey.String(val) -} - -// This group describes attributes specific to Azure Service Bus. -const ( - // MessagingServicebusDestinationSubscriptionNameKey is the attribute Key - // conforming to the "messaging.servicebus.destination.subscription_name" - // semantic conventions. It represents the name of the subscription in the - // topic messages are received from. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mySubscription' - MessagingServicebusDestinationSubscriptionNameKey = attribute.Key("messaging.servicebus.destination.subscription_name") - - // MessagingServicebusDispositionStatusKey is the attribute Key conforming - // to the "messaging.servicebus.disposition_status" semantic conventions. - // It represents the describes the [settlement - // type](https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingServicebusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status") - - // MessagingServicebusMessageDeliveryCountKey is the attribute Key - // conforming to the "messaging.servicebus.message.delivery_count" semantic - // conventions. It represents the number of deliveries that have been - // attempted for this message. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 2 - MessagingServicebusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count") - - // MessagingServicebusMessageEnqueuedTimeKey is the attribute Key - // conforming to the "messaging.servicebus.message.enqueued_time" semantic - // conventions. It represents the UTC epoch seconds at which the message - // has been accepted and stored in the entity. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1701393730 - MessagingServicebusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time") -) - -var ( - // Message is completed - MessagingServicebusDispositionStatusComplete = MessagingServicebusDispositionStatusKey.String("complete") - // Message is abandoned - MessagingServicebusDispositionStatusAbandon = MessagingServicebusDispositionStatusKey.String("abandon") - // Message is sent to dead letter queue - MessagingServicebusDispositionStatusDeadLetter = MessagingServicebusDispositionStatusKey.String("dead_letter") - // Message is deferred - MessagingServicebusDispositionStatusDefer = MessagingServicebusDispositionStatusKey.String("defer") -) - -// MessagingServicebusDestinationSubscriptionName returns an attribute -// KeyValue conforming to the -// "messaging.servicebus.destination.subscription_name" semantic conventions. -// It represents the name of the subscription in the topic messages are -// received from. -func MessagingServicebusDestinationSubscriptionName(val string) attribute.KeyValue { - return MessagingServicebusDestinationSubscriptionNameKey.String(val) -} - -// MessagingServicebusMessageDeliveryCount returns an attribute KeyValue -// conforming to the "messaging.servicebus.message.delivery_count" semantic -// conventions. It represents the number of deliveries that have been attempted -// for this message. -func MessagingServicebusMessageDeliveryCount(val int) attribute.KeyValue { - return MessagingServicebusMessageDeliveryCountKey.Int(val) -} - -// MessagingServicebusMessageEnqueuedTime returns an attribute KeyValue -// conforming to the "messaging.servicebus.message.enqueued_time" semantic -// conventions. It represents the UTC epoch seconds at which the message has -// been accepted and stored in the entity. -func MessagingServicebusMessageEnqueuedTime(val int) attribute.KeyValue { - return MessagingServicebusMessageEnqueuedTimeKey.Int(val) -} - -// This group describes attributes specific to Azure Event Hubs. -const ( - // MessagingEventhubsConsumerGroupKey is the attribute Key conforming to - // the "messaging.eventhubs.consumer.group" semantic conventions. It - // represents the name of the consumer group the event consumer is - // associated with. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'indexer' - MessagingEventhubsConsumerGroupKey = attribute.Key("messaging.eventhubs.consumer.group") - - // MessagingEventhubsMessageEnqueuedTimeKey is the attribute Key conforming - // to the "messaging.eventhubs.message.enqueued_time" semantic conventions. - // It represents the UTC epoch seconds at which the message has been - // accepted and stored in the entity. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1701393730 - MessagingEventhubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time") -) - -// MessagingEventhubsConsumerGroup returns an attribute KeyValue conforming -// to the "messaging.eventhubs.consumer.group" semantic conventions. It -// represents the name of the consumer group the event consumer is associated -// with. -func MessagingEventhubsConsumerGroup(val string) attribute.KeyValue { - return MessagingEventhubsConsumerGroupKey.String(val) -} - -// MessagingEventhubsMessageEnqueuedTime returns an attribute KeyValue -// conforming to the "messaging.eventhubs.message.enqueued_time" semantic -// conventions. It represents the UTC epoch seconds at which the message has -// been accepted and stored in the entity. -func MessagingEventhubsMessageEnqueuedTime(val int) attribute.KeyValue { - return MessagingEventhubsMessageEnqueuedTimeKey.Int(val) -} - -// These attributes may be used for any network related operation. -const ( - // NetworkCarrierIccKey is the attribute Key conforming to the - // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 - // alpha-2 2-character country code associated with the mobile carrier - // network. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'DE' - NetworkCarrierIccKey = attribute.Key("network.carrier.icc") - - // NetworkCarrierMccKey is the attribute Key conforming to the - // "network.carrier.mcc" semantic conventions. It represents the mobile - // carrier country code. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '310' - NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") - - // NetworkCarrierMncKey is the attribute Key conforming to the - // "network.carrier.mnc" semantic conventions. It represents the mobile - // carrier network code. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '001' - NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") - - // NetworkCarrierNameKey is the attribute Key conforming to the - // "network.carrier.name" semantic conventions. It represents the name of - // the mobile carrier. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'sprint' - NetworkCarrierNameKey = attribute.Key("network.carrier.name") - - // NetworkConnectionSubtypeKey is the attribute Key conforming to the - // "network.connection.subtype" semantic conventions. It represents the - // this describes more details regarding the connection.type. It may be the - // type of cell technology connection, but it could be used for describing - // details about a wifi connection. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'LTE' - NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") - - // NetworkConnectionTypeKey is the attribute Key conforming to the - // "network.connection.type" semantic conventions. It represents the - // internet connection type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'wifi' - NetworkConnectionTypeKey = attribute.Key("network.connection.type") - - // NetworkIoDirectionKey is the attribute Key conforming to the - // "network.io.direction" semantic conventions. It represents the network - // IO operation direction. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'transmit' - NetworkIoDirectionKey = attribute.Key("network.io.direction") - - // NetworkLocalAddressKey is the attribute Key conforming to the - // "network.local.address" semantic conventions. It represents the local - // address of the network connection - IP address or Unix domain socket - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '10.1.2.80', '/tmp/my.sock' - NetworkLocalAddressKey = attribute.Key("network.local.address") - - // NetworkLocalPortKey is the attribute Key conforming to the - // "network.local.port" semantic conventions. It represents the local port - // number of the network connection. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - NetworkLocalPortKey = attribute.Key("network.local.port") - - // NetworkPeerAddressKey is the attribute Key conforming to the - // "network.peer.address" semantic conventions. It represents the peer - // address of the network connection - IP address or Unix domain socket - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '10.1.2.80', '/tmp/my.sock' - NetworkPeerAddressKey = attribute.Key("network.peer.address") - - // NetworkPeerPortKey is the attribute Key conforming to the - // "network.peer.port" semantic conventions. It represents the peer port - // number of the network connection. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - NetworkPeerPortKey = attribute.Key("network.peer.port") - - // NetworkProtocolNameKey is the attribute Key conforming to the - // "network.protocol.name" semantic conventions. It represents the [OSI - // application layer](https://osi-model.com/application-layer/) or non-OSI - // equivalent. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'amqp', 'http', 'mqtt' - // Note: The value SHOULD be normalized to lowercase. - NetworkProtocolNameKey = attribute.Key("network.protocol.name") - - // NetworkProtocolVersionKey is the attribute Key conforming to the - // "network.protocol.version" semantic conventions. It represents the - // actual version of the protocol used for network communication. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1.1', '2' - // Note: If protocol version is subject to negotiation (for example using - // [ALPN](https://www.rfc-editor.org/rfc/rfc7301.html)), this attribute - // SHOULD be set to the negotiated version. If the actual protocol version - // is not known, this attribute SHOULD NOT be set. - NetworkProtocolVersionKey = attribute.Key("network.protocol.version") - - // NetworkTransportKey is the attribute Key conforming to the - // "network.transport" semantic conventions. It represents the [OSI - // transport layer](https://osi-model.com/transport-layer/) or - // [inter-process communication - // method](https://wikipedia.org/wiki/Inter-process_communication). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'tcp', 'udp' - // Note: The value SHOULD be normalized to lowercase. - // - // Consider always setting the transport when setting a port number, since - // a port number is ambiguous without knowing the transport. For example - // different processes could be listening on TCP port 12345 and UDP port - // 12345. - NetworkTransportKey = attribute.Key("network.transport") - - // NetworkTypeKey is the attribute Key conforming to the "network.type" - // semantic conventions. It represents the [OSI network - // layer](https://osi-model.com/network-layer/) or non-OSI equivalent. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'ipv4', 'ipv6' - // Note: The value SHOULD be normalized to lowercase. - NetworkTypeKey = attribute.Key("network.type") -) - -var ( - // GPRS - NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") - // EDGE - NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") - // UMTS - NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") - // CDMA - NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") - // EVDO Rel. 0 - NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") - // EVDO Rev. A - NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") - // CDMA2000 1XRTT - NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") - // HSDPA - NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") - // HSUPA - NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") - // HSPA - NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") - // IDEN - NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") - // EVDO Rev. B - NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") - // LTE - NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") - // EHRPD - NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") - // HSPAP - NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") - // GSM - NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") - // TD-SCDMA - NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") - // IWLAN - NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") - // 5G NR (New Radio) - NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") - // 5G NRNSA (New Radio Non-Standalone) - NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") - // LTE CA - NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") -) - -var ( - // wifi - NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") - // wired - NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") - // cell - NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") - // unavailable - NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") - // unknown - NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") -) - -var ( - // transmit - NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit") - // receive - NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive") -) - -var ( - // TCP - NetworkTransportTCP = NetworkTransportKey.String("tcp") - // UDP - NetworkTransportUDP = NetworkTransportKey.String("udp") - // Named or anonymous pipe - NetworkTransportPipe = NetworkTransportKey.String("pipe") - // Unix domain socket - NetworkTransportUnix = NetworkTransportKey.String("unix") -) - -var ( - // IPv4 - NetworkTypeIpv4 = NetworkTypeKey.String("ipv4") - // IPv6 - NetworkTypeIpv6 = NetworkTypeKey.String("ipv6") -) - -// NetworkCarrierIcc returns an attribute KeyValue conforming to the -// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 -// alpha-2 2-character country code associated with the mobile carrier network. -func NetworkCarrierIcc(val string) attribute.KeyValue { - return NetworkCarrierIccKey.String(val) -} - -// NetworkCarrierMcc returns an attribute KeyValue conforming to the -// "network.carrier.mcc" semantic conventions. It represents the mobile carrier -// country code. -func NetworkCarrierMcc(val string) attribute.KeyValue { - return NetworkCarrierMccKey.String(val) -} - -// NetworkCarrierMnc returns an attribute KeyValue conforming to the -// "network.carrier.mnc" semantic conventions. It represents the mobile carrier -// network code. -func NetworkCarrierMnc(val string) attribute.KeyValue { - return NetworkCarrierMncKey.String(val) -} - -// NetworkCarrierName returns an attribute KeyValue conforming to the -// "network.carrier.name" semantic conventions. It represents the name of the -// mobile carrier. -func NetworkCarrierName(val string) attribute.KeyValue { - return NetworkCarrierNameKey.String(val) -} - -// NetworkLocalAddress returns an attribute KeyValue conforming to the -// "network.local.address" semantic conventions. It represents the local -// address of the network connection - IP address or Unix domain socket name. -func NetworkLocalAddress(val string) attribute.KeyValue { - return NetworkLocalAddressKey.String(val) -} - -// NetworkLocalPort returns an attribute KeyValue conforming to the -// "network.local.port" semantic conventions. It represents the local port -// number of the network connection. -func NetworkLocalPort(val int) attribute.KeyValue { - return NetworkLocalPortKey.Int(val) -} - -// NetworkPeerAddress returns an attribute KeyValue conforming to the -// "network.peer.address" semantic conventions. It represents the peer address -// of the network connection - IP address or Unix domain socket name. -func NetworkPeerAddress(val string) attribute.KeyValue { - return NetworkPeerAddressKey.String(val) -} - -// NetworkPeerPort returns an attribute KeyValue conforming to the -// "network.peer.port" semantic conventions. It represents the peer port number -// of the network connection. -func NetworkPeerPort(val int) attribute.KeyValue { - return NetworkPeerPortKey.Int(val) -} - -// NetworkProtocolName returns an attribute KeyValue conforming to the -// "network.protocol.name" semantic conventions. It represents the [OSI -// application layer](https://osi-model.com/application-layer/) or non-OSI -// equivalent. -func NetworkProtocolName(val string) attribute.KeyValue { - return NetworkProtocolNameKey.String(val) -} - -// NetworkProtocolVersion returns an attribute KeyValue conforming to the -// "network.protocol.version" semantic conventions. It represents the actual -// version of the protocol used for network communication. -func NetworkProtocolVersion(val string) attribute.KeyValue { - return NetworkProtocolVersionKey.String(val) -} - -// An OCI image manifest. -const ( - // OciManifestDigestKey is the attribute Key conforming to the - // "oci.manifest.digest" semantic conventions. It represents the digest of - // the OCI image manifest. For container images specifically is the digest - // by which the container image is known. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' - // Note: Follows [OCI Image Manifest - // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md), - // and specifically the [Digest - // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests). - // An example can be found in [Example Image - // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest). - OciManifestDigestKey = attribute.Key("oci.manifest.digest") -) - -// OciManifestDigest returns an attribute KeyValue conforming to the -// "oci.manifest.digest" semantic conventions. It represents the digest of the -// OCI image manifest. For container images specifically is the digest by which -// the container image is known. -func OciManifestDigest(val string) attribute.KeyValue { - return OciManifestDigestKey.String(val) -} - -// Attributes used by the OpenTracing Shim layer. -const ( - // OpentracingRefTypeKey is the attribute Key conforming to the - // "opentracing.ref_type" semantic conventions. It represents the - // parent-child Reference type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The causal relationship between a child Span and a parent Span. - OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") -) - -var ( - // The parent Span depends on the child Span in some capacity - OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") - // The parent Span doesn't depend in any way on the result of the child Span - OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") -) - -// The operating system (OS) on which the process represented by this resource -// is running. -const ( - // OSBuildIDKey is the attribute Key conforming to the "os.build_id" - // semantic conventions. It represents the unique identifier for a - // particular build or compilation of the operating system. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' - OSBuildIDKey = attribute.Key("os.build_id") - - // OSDescriptionKey is the attribute Key conforming to the "os.description" - // semantic conventions. It represents the human readable (not intended to - // be parsed) OS version information, like e.g. reported by `ver` or - // `lsb_release -a` commands. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 - // LTS' - OSDescriptionKey = attribute.Key("os.description") - - // OSNameKey is the attribute Key conforming to the "os.name" semantic - // conventions. It represents the human readable operating system name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'iOS', 'Android', 'Ubuntu' - OSNameKey = attribute.Key("os.name") - - // OSTypeKey is the attribute Key conforming to the "os.type" semantic - // conventions. It represents the operating system type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - OSTypeKey = attribute.Key("os.type") - - // OSVersionKey is the attribute Key conforming to the "os.version" - // semantic conventions. It represents the version string of the operating - // system as defined in [Version - // Attributes](/docs/resource/README.md#version-attributes). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '14.2.1', '18.04.1' - OSVersionKey = attribute.Key("os.version") -) - -var ( - // Microsoft Windows - OSTypeWindows = OSTypeKey.String("windows") - // Linux - OSTypeLinux = OSTypeKey.String("linux") - // Apple Darwin - OSTypeDarwin = OSTypeKey.String("darwin") - // FreeBSD - OSTypeFreeBSD = OSTypeKey.String("freebsd") - // NetBSD - OSTypeNetBSD = OSTypeKey.String("netbsd") - // OpenBSD - OSTypeOpenBSD = OSTypeKey.String("openbsd") - // DragonFly BSD - OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") - // HP-UX (Hewlett Packard Unix) - OSTypeHPUX = OSTypeKey.String("hpux") - // AIX (Advanced Interactive eXecutive) - OSTypeAIX = OSTypeKey.String("aix") - // SunOS, Oracle Solaris - OSTypeSolaris = OSTypeKey.String("solaris") - // IBM z/OS - OSTypeZOS = OSTypeKey.String("z_os") -) - -// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" -// semantic conventions. It represents the unique identifier for a particular -// build or compilation of the operating system. -func OSBuildID(val string) attribute.KeyValue { - return OSBuildIDKey.String(val) -} - -// OSDescription returns an attribute KeyValue conforming to the -// "os.description" semantic conventions. It represents the human readable (not -// intended to be parsed) OS version information, like e.g. reported by `ver` -// or `lsb_release -a` commands. -func OSDescription(val string) attribute.KeyValue { - return OSDescriptionKey.String(val) -} - -// OSName returns an attribute KeyValue conforming to the "os.name" semantic -// conventions. It represents the human readable operating system name. -func OSName(val string) attribute.KeyValue { - return OSNameKey.String(val) -} - -// OSVersion returns an attribute KeyValue conforming to the "os.version" -// semantic conventions. It represents the version string of the operating -// system as defined in [Version -// Attributes](/docs/resource/README.md#version-attributes). -func OSVersion(val string) attribute.KeyValue { - return OSVersionKey.String(val) -} - -// Attributes reserved for OpenTelemetry -const ( - // OTelStatusCodeKey is the attribute Key conforming to the - // "otel.status_code" semantic conventions. It represents the name of the - // code, either "OK" or "ERROR". MUST NOT be set if the status code is - // UNSET. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - OTelStatusCodeKey = attribute.Key("otel.status_code") - - // OTelStatusDescriptionKey is the attribute Key conforming to the - // "otel.status_description" semantic conventions. It represents the - // description of the Status if it has a value, otherwise not set. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'resource not found' - OTelStatusDescriptionKey = attribute.Key("otel.status_description") -) - -var ( - // The operation has been validated by an Application developer or Operator to have completed successfully - OTelStatusCodeOk = OTelStatusCodeKey.String("OK") - // The operation contains an error - OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") -) - -// OTelStatusDescription returns an attribute KeyValue conforming to the -// "otel.status_description" semantic conventions. It represents the -// description of the Status if it has a value, otherwise not set. -func OTelStatusDescription(val string) attribute.KeyValue { - return OTelStatusDescriptionKey.String(val) -} - -// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's -// concepts. -const ( - // OTelScopeNameKey is the attribute Key conforming to the - // "otel.scope.name" semantic conventions. It represents the name of the - // instrumentation scope - (`InstrumentationScope.Name` in OTLP). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'io.opentelemetry.contrib.mongodb' - OTelScopeNameKey = attribute.Key("otel.scope.name") - - // OTelScopeVersionKey is the attribute Key conforming to the - // "otel.scope.version" semantic conventions. It represents the version of - // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1.0.0' - OTelScopeVersionKey = attribute.Key("otel.scope.version") -) - -// OTelScopeName returns an attribute KeyValue conforming to the -// "otel.scope.name" semantic conventions. It represents the name of the -// instrumentation scope - (`InstrumentationScope.Name` in OTLP). -func OTelScopeName(val string) attribute.KeyValue { - return OTelScopeNameKey.String(val) -} - -// OTelScopeVersion returns an attribute KeyValue conforming to the -// "otel.scope.version" semantic conventions. It represents the version of the -// instrumentation scope - (`InstrumentationScope.Version` in OTLP). -func OTelScopeVersion(val string) attribute.KeyValue { - return OTelScopeVersionKey.String(val) -} - -// Operations that access some remote service. -const ( - // PeerServiceKey is the attribute Key conforming to the "peer.service" - // semantic conventions. It represents the - // [`service.name`](/docs/resource/README.md#service) of the remote - // service. SHOULD be equal to the actual `service.name` resource attribute - // of the remote service if any. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'AuthTokenCache' - PeerServiceKey = attribute.Key("peer.service") -) - -// PeerService returns an attribute KeyValue conforming to the -// "peer.service" semantic conventions. It represents the -// [`service.name`](/docs/resource/README.md#service) of the remote service. -// SHOULD be equal to the actual `service.name` resource attribute of the -// remote service if any. -func PeerService(val string) attribute.KeyValue { - return PeerServiceKey.String(val) -} - -// An operating system process. -const ( - // ProcessCommandKey is the attribute Key conforming to the - // "process.command" semantic conventions. It represents the command used - // to launch the process (i.e. the command name). On Linux based systems, - // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can - // be set to the first parameter extracted from `GetCommandLineW`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'cmd/otelcol' - ProcessCommandKey = attribute.Key("process.command") - - // ProcessCommandArgsKey is the attribute Key conforming to the - // "process.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) as received - // by the process. On Linux-based systems (and some other Unixoid systems - // supporting procfs), can be set according to the list of null-delimited - // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, - // this would be the full argv vector passed to `main`. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'cmd/otecol', '--config=config.yaml' - ProcessCommandArgsKey = attribute.Key("process.command_args") - - // ProcessCommandLineKey is the attribute Key conforming to the - // "process.command_line" semantic conventions. It represents the full - // command used to launch the process as a single string representing the - // full command. On Windows, can be set to the result of `GetCommandLineW`. - // Do not set this if you have to assemble it just for monitoring; use - // `process.command_args` instead. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' - ProcessCommandLineKey = attribute.Key("process.command_line") - - // ProcessContextSwitchTypeKey is the attribute Key conforming to the - // "process.context_switch_type" semantic conventions. It represents the - // specifies whether the context switches for this data point were - // voluntary or involuntary. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type") - - // ProcessCreationTimeKey is the attribute Key conforming to the - // "process.creation.time" semantic conventions. It represents the date and - // time the process was created, in ISO 8601 format. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2023-11-21T09:25:34.853Z' - ProcessCreationTimeKey = attribute.Key("process.creation.time") - - // ProcessExecutableNameKey is the attribute Key conforming to the - // "process.executable.name" semantic conventions. It represents the name - // of the process executable. On Linux based systems, can be set to the - // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name - // of `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcol' - ProcessExecutableNameKey = attribute.Key("process.executable.name") - - // ProcessExecutablePathKey is the attribute Key conforming to the - // "process.executable.path" semantic conventions. It represents the full - // path to the process executable. On Linux based systems, can be set to - // the target of `proc/[pid]/exe`. On Windows, can be set to the result of - // `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/usr/bin/cmd/otelcol' - ProcessExecutablePathKey = attribute.Key("process.executable.path") - - // ProcessExitCodeKey is the attribute Key conforming to the - // "process.exit.code" semantic conventions. It represents the exit code of - // the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 127 - ProcessExitCodeKey = attribute.Key("process.exit.code") - - // ProcessExitTimeKey is the attribute Key conforming to the - // "process.exit.time" semantic conventions. It represents the date and - // time the process exited, in ISO 8601 format. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2023-11-21T09:26:12.315Z' - ProcessExitTimeKey = attribute.Key("process.exit.time") - - // ProcessGroupLeaderPIDKey is the attribute Key conforming to the - // "process.group_leader.pid" semantic conventions. It represents the PID - // of the process's group leader. This is also the process group ID (PGID) - // of the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 23 - ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid") - - // ProcessInteractiveKey is the attribute Key conforming to the - // "process.interactive" semantic conventions. It represents the whether - // the process is connected to an interactive shell. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - ProcessInteractiveKey = attribute.Key("process.interactive") - - // ProcessOwnerKey is the attribute Key conforming to the "process.owner" - // semantic conventions. It represents the username of the user that owns - // the process. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'root' - ProcessOwnerKey = attribute.Key("process.owner") - - // ProcessPagingFaultTypeKey is the attribute Key conforming to the - // "process.paging.fault_type" semantic conventions. It represents the type - // of page fault for this data point. Type `major` is for major/hard page - // faults, and `minor` is for minor/soft page faults. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type") - - // ProcessParentPIDKey is the attribute Key conforming to the - // "process.parent_pid" semantic conventions. It represents the parent - // Process identifier (PPID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 111 - ProcessParentPIDKey = attribute.Key("process.parent_pid") - - // ProcessPIDKey is the attribute Key conforming to the "process.pid" - // semantic conventions. It represents the process identifier (PID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1234 - ProcessPIDKey = attribute.Key("process.pid") - - // ProcessRealUserIDKey is the attribute Key conforming to the - // "process.real_user.id" semantic conventions. It represents the real user - // ID (RUID) of the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1000 - ProcessRealUserIDKey = attribute.Key("process.real_user.id") - - // ProcessRealUserNameKey is the attribute Key conforming to the - // "process.real_user.name" semantic conventions. It represents the - // username of the real user of the process. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'operator' - ProcessRealUserNameKey = attribute.Key("process.real_user.name") - - // ProcessRuntimeDescriptionKey is the attribute Key conforming to the - // "process.runtime.description" semantic conventions. It represents an - // additional description about the runtime of the process, for example a - // specific vendor customization of the runtime environment. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' - ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") - - // ProcessRuntimeNameKey is the attribute Key conforming to the - // "process.runtime.name" semantic conventions. It represents the name of - // the runtime of this process. For compiled native binaries, this SHOULD - // be the name of the compiler. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'OpenJDK Runtime Environment' - ProcessRuntimeNameKey = attribute.Key("process.runtime.name") - - // ProcessRuntimeVersionKey is the attribute Key conforming to the - // "process.runtime.version" semantic conventions. It represents the - // version of the runtime of this process, as returned by the runtime - // without modification. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '14.0.2' - ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") - - // ProcessSavedUserIDKey is the attribute Key conforming to the - // "process.saved_user.id" semantic conventions. It represents the saved - // user ID (SUID) of the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1002 - ProcessSavedUserIDKey = attribute.Key("process.saved_user.id") - - // ProcessSavedUserNameKey is the attribute Key conforming to the - // "process.saved_user.name" semantic conventions. It represents the - // username of the saved user. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'operator' - ProcessSavedUserNameKey = attribute.Key("process.saved_user.name") - - // ProcessSessionLeaderPIDKey is the attribute Key conforming to the - // "process.session_leader.pid" semantic conventions. It represents the PID - // of the process's session leader. This is also the session ID (SID) of - // the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 14 - ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid") - - // ProcessUserIDKey is the attribute Key conforming to the - // "process.user.id" semantic conventions. It represents the effective user - // ID (EUID) of the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1001 - ProcessUserIDKey = attribute.Key("process.user.id") - - // ProcessUserNameKey is the attribute Key conforming to the - // "process.user.name" semantic conventions. It represents the username of - // the effective user of the process. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'root' - ProcessUserNameKey = attribute.Key("process.user.name") - - // ProcessVpidKey is the attribute Key conforming to the "process.vpid" - // semantic conventions. It represents the virtual process identifier. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 12 - // Note: The process ID within a PID namespace. This is not necessarily - // unique across all processes on the host but it is unique within the - // process namespace that the process exists within. - ProcessVpidKey = attribute.Key("process.vpid") -) - -var ( - // voluntary - ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary") - // involuntary - ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary") -) - -var ( - // major - ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major") - // minor - ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor") -) - -// ProcessCommand returns an attribute KeyValue conforming to the -// "process.command" semantic conventions. It represents the command used to -// launch the process (i.e. the command name). On Linux based systems, can be -// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to -// the first parameter extracted from `GetCommandLineW`. -func ProcessCommand(val string) attribute.KeyValue { - return ProcessCommandKey.String(val) -} - -// ProcessCommandArgs returns an attribute KeyValue conforming to the -// "process.command_args" semantic conventions. It represents the all the -// command arguments (including the command/executable itself) as received by -// the process. On Linux-based systems (and some other Unixoid systems -// supporting procfs), can be set according to the list of null-delimited -// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, -// this would be the full argv vector passed to `main`. -func ProcessCommandArgs(val ...string) attribute.KeyValue { - return ProcessCommandArgsKey.StringSlice(val) -} - -// ProcessCommandLine returns an attribute KeyValue conforming to the -// "process.command_line" semantic conventions. It represents the full command -// used to launch the process as a single string representing the full command. -// On Windows, can be set to the result of `GetCommandLineW`. Do not set this -// if you have to assemble it just for monitoring; use `process.command_args` -// instead. -func ProcessCommandLine(val string) attribute.KeyValue { - return ProcessCommandLineKey.String(val) -} - -// ProcessCreationTime returns an attribute KeyValue conforming to the -// "process.creation.time" semantic conventions. It represents the date and -// time the process was created, in ISO 8601 format. -func ProcessCreationTime(val string) attribute.KeyValue { - return ProcessCreationTimeKey.String(val) -} - -// ProcessExecutableName returns an attribute KeyValue conforming to the -// "process.executable.name" semantic conventions. It represents the name of -// the process executable. On Linux based systems, can be set to the `Name` in -// `proc/[pid]/status`. On Windows, can be set to the base name of -// `GetProcessImageFileNameW`. -func ProcessExecutableName(val string) attribute.KeyValue { - return ProcessExecutableNameKey.String(val) -} - -// ProcessExecutablePath returns an attribute KeyValue conforming to the -// "process.executable.path" semantic conventions. It represents the full path -// to the process executable. On Linux based systems, can be set to the target -// of `proc/[pid]/exe`. On Windows, can be set to the result of -// `GetProcessImageFileNameW`. -func ProcessExecutablePath(val string) attribute.KeyValue { - return ProcessExecutablePathKey.String(val) -} - -// ProcessExitCode returns an attribute KeyValue conforming to the -// "process.exit.code" semantic conventions. It represents the exit code of the -// process. -func ProcessExitCode(val int) attribute.KeyValue { - return ProcessExitCodeKey.Int(val) -} - -// ProcessExitTime returns an attribute KeyValue conforming to the -// "process.exit.time" semantic conventions. It represents the date and time -// the process exited, in ISO 8601 format. -func ProcessExitTime(val string) attribute.KeyValue { - return ProcessExitTimeKey.String(val) -} - -// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the -// "process.group_leader.pid" semantic conventions. It represents the PID of -// the process's group leader. This is also the process group ID (PGID) of the -// process. -func ProcessGroupLeaderPID(val int) attribute.KeyValue { - return ProcessGroupLeaderPIDKey.Int(val) -} - -// ProcessInteractive returns an attribute KeyValue conforming to the -// "process.interactive" semantic conventions. It represents the whether the -// process is connected to an interactive shell. -func ProcessInteractive(val bool) attribute.KeyValue { - return ProcessInteractiveKey.Bool(val) -} - -// ProcessOwner returns an attribute KeyValue conforming to the -// "process.owner" semantic conventions. It represents the username of the user -// that owns the process. -func ProcessOwner(val string) attribute.KeyValue { - return ProcessOwnerKey.String(val) -} - -// ProcessParentPID returns an attribute KeyValue conforming to the -// "process.parent_pid" semantic conventions. It represents the parent Process -// identifier (PPID). -func ProcessParentPID(val int) attribute.KeyValue { - return ProcessParentPIDKey.Int(val) -} - -// ProcessPID returns an attribute KeyValue conforming to the "process.pid" -// semantic conventions. It represents the process identifier (PID). -func ProcessPID(val int) attribute.KeyValue { - return ProcessPIDKey.Int(val) -} - -// ProcessRealUserID returns an attribute KeyValue conforming to the -// "process.real_user.id" semantic conventions. It represents the real user ID -// (RUID) of the process. -func ProcessRealUserID(val int) attribute.KeyValue { - return ProcessRealUserIDKey.Int(val) -} - -// ProcessRealUserName returns an attribute KeyValue conforming to the -// "process.real_user.name" semantic conventions. It represents the username of -// the real user of the process. -func ProcessRealUserName(val string) attribute.KeyValue { - return ProcessRealUserNameKey.String(val) -} - -// ProcessRuntimeDescription returns an attribute KeyValue conforming to the -// "process.runtime.description" semantic conventions. It represents an -// additional description about the runtime of the process, for example a -// specific vendor customization of the runtime environment. -func ProcessRuntimeDescription(val string) attribute.KeyValue { - return ProcessRuntimeDescriptionKey.String(val) -} - -// ProcessRuntimeName returns an attribute KeyValue conforming to the -// "process.runtime.name" semantic conventions. It represents the name of the -// runtime of this process. For compiled native binaries, this SHOULD be the -// name of the compiler. -func ProcessRuntimeName(val string) attribute.KeyValue { - return ProcessRuntimeNameKey.String(val) -} - -// ProcessRuntimeVersion returns an attribute KeyValue conforming to the -// "process.runtime.version" semantic conventions. It represents the version of -// the runtime of this process, as returned by the runtime without -// modification. -func ProcessRuntimeVersion(val string) attribute.KeyValue { - return ProcessRuntimeVersionKey.String(val) -} - -// ProcessSavedUserID returns an attribute KeyValue conforming to the -// "process.saved_user.id" semantic conventions. It represents the saved user -// ID (SUID) of the process. -func ProcessSavedUserID(val int) attribute.KeyValue { - return ProcessSavedUserIDKey.Int(val) -} - -// ProcessSavedUserName returns an attribute KeyValue conforming to the -// "process.saved_user.name" semantic conventions. It represents the username -// of the saved user. -func ProcessSavedUserName(val string) attribute.KeyValue { - return ProcessSavedUserNameKey.String(val) -} - -// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the -// "process.session_leader.pid" semantic conventions. It represents the PID of -// the process's session leader. This is also the session ID (SID) of the -// process. -func ProcessSessionLeaderPID(val int) attribute.KeyValue { - return ProcessSessionLeaderPIDKey.Int(val) -} - -// ProcessUserID returns an attribute KeyValue conforming to the -// "process.user.id" semantic conventions. It represents the effective user ID -// (EUID) of the process. -func ProcessUserID(val int) attribute.KeyValue { - return ProcessUserIDKey.Int(val) -} - -// ProcessUserName returns an attribute KeyValue conforming to the -// "process.user.name" semantic conventions. It represents the username of the -// effective user of the process. -func ProcessUserName(val string) attribute.KeyValue { - return ProcessUserNameKey.String(val) -} - -// ProcessVpid returns an attribute KeyValue conforming to the -// "process.vpid" semantic conventions. It represents the virtual process -// identifier. -func ProcessVpid(val int) attribute.KeyValue { - return ProcessVpidKey.Int(val) -} - -// Attributes for process CPU -const ( - // ProcessCPUStateKey is the attribute Key conforming to the - // "process.cpu.state" semantic conventions. It represents the CPU state of - // the process. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - ProcessCPUStateKey = attribute.Key("process.cpu.state") -) - -var ( - // system - ProcessCPUStateSystem = ProcessCPUStateKey.String("system") - // user - ProcessCPUStateUser = ProcessCPUStateKey.String("user") - // wait - ProcessCPUStateWait = ProcessCPUStateKey.String("wait") -) - -// Attributes for remote procedure calls. -const ( - // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the - // "rpc.connect_rpc.error_code" semantic conventions. It represents the - // [error codes](https://connect.build/docs/protocol/#error-codes) of the - // Connect request. Error codes are always string values. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") - - // RPCGRPCStatusCodeKey is the attribute Key conforming to the - // "rpc.grpc.status_code" semantic conventions. It represents the [numeric - // status - // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of - // the gRPC request. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") - - // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_code" semantic conventions. It represents the - // `error.code` property of response if it is an error response. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: -32700, 100 - RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") - - // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_message" semantic conventions. It represents the - // `error.message` property of response if it is an error response. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Parse error', 'User already exists' - RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") - - // RPCJsonrpcRequestIDKey is the attribute Key conforming to the - // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` - // property of request or response. Since protocol allows id to be int, - // string, `null` or missing (for notifications), value is expected to be - // cast to string for simplicity. Use empty string in case of `null` value. - // Omit entirely if this is a notification. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '10', 'request-7', '' - RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") - - // RPCJsonrpcVersionKey is the attribute Key conforming to the - // "rpc.jsonrpc.version" semantic conventions. It represents the protocol - // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 - // doesn't specify this, the value can be omitted. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2.0', '1.0' - RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") - - // RPCMessageCompressedSizeKey is the attribute Key conforming to the - // "rpc.message.compressed_size" semantic conventions. It represents the - // compressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size") - - // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id" - // semantic conventions. It represents the mUST be calculated as two - // different counters starting from `1` one for sent messages and one for - // received message. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Note: This way we guarantee that the values will be consistent between - // different implementations. - RPCMessageIDKey = attribute.Key("rpc.message.id") - - // RPCMessageTypeKey is the attribute Key conforming to the - // "rpc.message.type" semantic conventions. It represents the whether this - // is a received or sent message. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCMessageTypeKey = attribute.Key("rpc.message.type") - - // RPCMessageUncompressedSizeKey is the attribute Key conforming to the - // "rpc.message.uncompressed_size" semantic conventions. It represents the - // uncompressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size") - - // RPCMethodKey is the attribute Key conforming to the "rpc.method" - // semantic conventions. It represents the name of the (logical) method - // being called, must be equal to the $method part in the span name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'exampleMethod' - // Note: This is the logical name of the method from the RPC interface - // perspective, which can be different from the name of any implementing - // method/function. The `code.function` attribute may be used to store the - // latter (e.g., method actually executing the call on the server side, RPC - // client stub method on the client side). - RPCMethodKey = attribute.Key("rpc.method") - - // RPCServiceKey is the attribute Key conforming to the "rpc.service" - // semantic conventions. It represents the full (logical) name of the - // service being called, including its package name, if applicable. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myservice.EchoService' - // Note: This is the logical name of the service from the RPC interface - // perspective, which can be different from the name of any implementing - // class. The `code.namespace` attribute may be used to store the latter - // (despite the attribute name, it may include a class name; e.g., class - // with method actually executing the call on the server side, RPC client - // stub class on the client side). - RPCServiceKey = attribute.Key("rpc.service") - - // RPCSystemKey is the attribute Key conforming to the "rpc.system" - // semantic conventions. It represents a string identifying the remoting - // system. See below for a list of well-known identifiers. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCSystemKey = attribute.Key("rpc.system") -) - -var ( - // cancelled - RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") - // unknown - RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") - // invalid_argument - RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") - // deadline_exceeded - RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") - // not_found - RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") - // already_exists - RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") - // permission_denied - RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") - // resource_exhausted - RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") - // failed_precondition - RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") - // aborted - RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") - // out_of_range - RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") - // unimplemented - RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") - // internal - RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") - // unavailable - RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") - // data_loss - RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") - // unauthenticated - RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") -) - -var ( - // OK - RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) - // CANCELLED - RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) - // UNKNOWN - RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) - // INVALID_ARGUMENT - RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) - // DEADLINE_EXCEEDED - RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) - // NOT_FOUND - RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) - // ALREADY_EXISTS - RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) - // PERMISSION_DENIED - RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) - // RESOURCE_EXHAUSTED - RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) - // FAILED_PRECONDITION - RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) - // ABORTED - RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) - // OUT_OF_RANGE - RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) - // UNIMPLEMENTED - RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) - // INTERNAL - RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) - // UNAVAILABLE - RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) - // DATA_LOSS - RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) - // UNAUTHENTICATED - RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) -) - -var ( - // sent - RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") - // received - RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") -) - -var ( - // gRPC - RPCSystemGRPC = RPCSystemKey.String("grpc") - // Java RMI - RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") - // .NET WCF - RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") - // Apache Dubbo - RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") - // Connect RPC - RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") -) - -// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_code" semantic conventions. It represents the -// `error.code` property of response if it is an error response. -func RPCJsonrpcErrorCode(val int) attribute.KeyValue { - return RPCJsonrpcErrorCodeKey.Int(val) -} - -// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_message" semantic conventions. It represents the -// `error.message` property of response if it is an error response. -func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { - return RPCJsonrpcErrorMessageKey.String(val) -} - -// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` -// property of request or response. Since protocol allows id to be int, string, -// `null` or missing (for notifications), value is expected to be cast to -// string for simplicity. Use empty string in case of `null` value. Omit -// entirely if this is a notification. -func RPCJsonrpcRequestID(val string) attribute.KeyValue { - return RPCJsonrpcRequestIDKey.String(val) -} - -// RPCJsonrpcVersion returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.version" semantic conventions. It represents the protocol -// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 -// doesn't specify this, the value can be omitted. -func RPCJsonrpcVersion(val string) attribute.KeyValue { - return RPCJsonrpcVersionKey.String(val) -} - -// RPCMessageCompressedSize returns an attribute KeyValue conforming to the -// "rpc.message.compressed_size" semantic conventions. It represents the -// compressed size of the message in bytes. -func RPCMessageCompressedSize(val int) attribute.KeyValue { - return RPCMessageCompressedSizeKey.Int(val) -} - -// RPCMessageID returns an attribute KeyValue conforming to the -// "rpc.message.id" semantic conventions. It represents the mUST be calculated -// as two different counters starting from `1` one for sent messages and one -// for received message. -func RPCMessageID(val int) attribute.KeyValue { - return RPCMessageIDKey.Int(val) -} - -// RPCMessageUncompressedSize returns an attribute KeyValue conforming to -// the "rpc.message.uncompressed_size" semantic conventions. It represents the -// uncompressed size of the message in bytes. -func RPCMessageUncompressedSize(val int) attribute.KeyValue { - return RPCMessageUncompressedSizeKey.Int(val) -} - -// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" -// semantic conventions. It represents the name of the (logical) method being -// called, must be equal to the $method part in the span name. -func RPCMethod(val string) attribute.KeyValue { - return RPCMethodKey.String(val) -} - -// RPCService returns an attribute KeyValue conforming to the "rpc.service" -// semantic conventions. It represents the full (logical) name of the service -// being called, including its package name, if applicable. -func RPCService(val string) attribute.KeyValue { - return RPCServiceKey.String(val) -} - -// These attributes may be used to describe the server in a connection-based -// network interaction where there is one side that initiates the connection -// (the client is the side that initiates the connection). This covers all TCP -// network interactions since TCP is connection-based and one side initiates -// the connection (an exception is made for peer-to-peer communication over TCP -// where the "user-facing" surface of the protocol / API doesn't expose a clear -// notion of client and server). This also covers UDP network interactions -// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. -const ( - // ServerAddressKey is the attribute Key conforming to the "server.address" - // semantic conventions. It represents the server domain name if available - // without reverse DNS lookup; otherwise, IP address or Unix domain socket - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the client side, and when communicating through - // an intermediary, `server.address` SHOULD represent the server address - // behind any intermediaries, for example proxies, if it's available. - ServerAddressKey = attribute.Key("server.address") - - // ServerPortKey is the attribute Key conforming to the "server.port" - // semantic conventions. It represents the server port number. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 80, 8080, 443 - // Note: When observed from the client side, and when communicating through - // an intermediary, `server.port` SHOULD represent the server port behind - // any intermediaries, for example proxies, if it's available. - ServerPortKey = attribute.Key("server.port") -) - -// ServerAddress returns an attribute KeyValue conforming to the -// "server.address" semantic conventions. It represents the server domain name -// if available without reverse DNS lookup; otherwise, IP address or Unix -// domain socket name. -func ServerAddress(val string) attribute.KeyValue { - return ServerAddressKey.String(val) -} - -// ServerPort returns an attribute KeyValue conforming to the "server.port" -// semantic conventions. It represents the server port number. -func ServerPort(val int) attribute.KeyValue { - return ServerPortKey.Int(val) -} - -// A service instance. -const ( - // ServiceInstanceIDKey is the attribute Key conforming to the - // "service.instance.id" semantic conventions. It represents the string ID - // of the service instance. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '627cc493-f310-47de-96bd-71410b7dec09' - // Note: MUST be unique for each instance of the same - // `service.namespace,service.name` pair (in other words - // `service.namespace,service.name,service.instance.id` triplet MUST be - // globally unique). The ID helps to - // distinguish instances of the same service that exist at the same time - // (e.g. instances of a horizontally scaled - // service). - // - // Implementations, such as SDKs, are recommended to generate a random - // Version 1 or Version 4 [RFC - // 4122](https://www.ietf.org/rfc/rfc4122.txt) UUID, but are free to use an - // inherent unique ID as the source of - // this value if stability is desirable. In that case, the ID SHOULD be - // used as source of a UUID Version 5 and - // SHOULD use the following UUID as the namespace: - // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`. - // - // UUIDs are typically recommended, as only an opaque value for the - // purposes of identifying a service instance is - // needed. Similar to what can be seen in the man page for the - // [`/etc/machine-id`](https://www.freedesktop.org/software/systemd/man/machine-id.html) - // file, the underlying - // data, such as pod name and namespace should be treated as confidential, - // being the user's choice to expose it - // or not via another resource attribute. - // - // For applications running behind an application server (like unicorn), we - // do not recommend using one identifier - // for all processes participating in the application. Instead, it's - // recommended each division (e.g. a worker - // thread in unicorn) to have its own instance.id. - // - // It's not recommended for a Collector to set `service.instance.id` if it - // can't unambiguously determine the - // service instance that is generating that telemetry. For instance, - // creating an UUID based on `pod.name` will - // likely be wrong, as the Collector might not know from which container - // within that pod the telemetry originated. - // However, Collectors can set the `service.instance.id` if they can - // unambiguously determine the service instance - // for that telemetry. This is typically the case for scraping receivers, - // as they know the target address and - // port. - ServiceInstanceIDKey = attribute.Key("service.instance.id") - - // ServiceNameKey is the attribute Key conforming to the "service.name" - // semantic conventions. It represents the logical name of the service. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'shoppingcart' - // Note: MUST be the same for all instances of horizontally scaled - // services. If the value was not specified, SDKs MUST fallback to - // `unknown_service:` concatenated with - // [`process.executable.name`](process.md), e.g. `unknown_service:bash`. If - // `process.executable.name` is not available, the value MUST be set to - // `unknown_service`. - ServiceNameKey = attribute.Key("service.name") - - // ServiceNamespaceKey is the attribute Key conforming to the - // "service.namespace" semantic conventions. It represents a namespace for - // `service.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Shop' - // Note: A string value having a meaning that helps to distinguish a group - // of services, for example the team name that owns a group of services. - // `service.name` is expected to be unique within the same namespace. If - // `service.namespace` is not specified in the Resource then `service.name` - // is expected to be unique for all services that have no explicit - // namespace defined (so the empty/unspecified namespace is simply one more - // valid namespace). Zero-length namespace string is assumed equal to - // unspecified namespace. - ServiceNamespaceKey = attribute.Key("service.namespace") - - // ServiceVersionKey is the attribute Key conforming to the - // "service.version" semantic conventions. It represents the version string - // of the service API or implementation. The format is not defined by these - // conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2.0.0', 'a01dbef8a' - ServiceVersionKey = attribute.Key("service.version") -) - -// ServiceInstanceID returns an attribute KeyValue conforming to the -// "service.instance.id" semantic conventions. It represents the string ID of -// the service instance. -func ServiceInstanceID(val string) attribute.KeyValue { - return ServiceInstanceIDKey.String(val) -} - -// ServiceName returns an attribute KeyValue conforming to the -// "service.name" semantic conventions. It represents the logical name of the -// service. -func ServiceName(val string) attribute.KeyValue { - return ServiceNameKey.String(val) -} - -// ServiceNamespace returns an attribute KeyValue conforming to the -// "service.namespace" semantic conventions. It represents a namespace for -// `service.name`. -func ServiceNamespace(val string) attribute.KeyValue { - return ServiceNamespaceKey.String(val) -} - -// ServiceVersion returns an attribute KeyValue conforming to the -// "service.version" semantic conventions. It represents the version string of -// the service API or implementation. The format is not defined by these -// conventions. -func ServiceVersion(val string) attribute.KeyValue { - return ServiceVersionKey.String(val) -} - -// Session is defined as the period of time encompassing all activities -// performed by the application and the actions executed by the end user. -// Consequently, a Session is represented as a collection of Logs, Events, and -// Spans emitted by the Client Application throughout the Session's duration. -// Each Session is assigned a unique identifier, which is included as an -// attribute in the Logs, Events, and Spans generated during the Session's -// lifecycle. -// When a session reaches end of life, typically due to user inactivity or -// session timeout, a new session identifier will be assigned. The previous -// session identifier may be provided by the instrumentation so that telemetry -// backends can link the two sessions. -const ( - // SessionIDKey is the attribute Key conforming to the "session.id" - // semantic conventions. It represents a unique id to identify a session. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '00112233-4455-6677-8899-aabbccddeeff' - SessionIDKey = attribute.Key("session.id") - - // SessionPreviousIDKey is the attribute Key conforming to the - // "session.previous_id" semantic conventions. It represents the previous - // `session.id` for this user, when known. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '00112233-4455-6677-8899-aabbccddeeff' - SessionPreviousIDKey = attribute.Key("session.previous_id") -) - -// SessionID returns an attribute KeyValue conforming to the "session.id" -// semantic conventions. It represents a unique id to identify a session. -func SessionID(val string) attribute.KeyValue { - return SessionIDKey.String(val) -} - -// SessionPreviousID returns an attribute KeyValue conforming to the -// "session.previous_id" semantic conventions. It represents the previous -// `session.id` for this user, when known. -func SessionPreviousID(val string) attribute.KeyValue { - return SessionPreviousIDKey.String(val) -} - -// SignalR attributes -const ( - // SignalrConnectionStatusKey is the attribute Key conforming to the - // "signalr.connection.status" semantic conventions. It represents the - // signalR HTTP connection closure status. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'app_shutdown', 'timeout' - SignalrConnectionStatusKey = attribute.Key("signalr.connection.status") - - // SignalrTransportKey is the attribute Key conforming to the - // "signalr.transport" semantic conventions. It represents the [SignalR - // transport - // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'web_sockets', 'long_polling' - SignalrTransportKey = attribute.Key("signalr.transport") -) - -var ( - // The connection was closed normally - SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure") - // The connection was closed due to a timeout - SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout") - // The connection was closed because the app is shutting down - SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown") -) - -var ( - // ServerSentEvents protocol - SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events") - // LongPolling protocol - SignalrTransportLongPolling = SignalrTransportKey.String("long_polling") - // WebSockets protocol - SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets") -) - -// These attributes may be used to describe the sender of a network -// exchange/packet. These should be used when there is no client/server -// relationship between the two sides, or when that relationship is unknown. -// This covers low-level network interactions (e.g. packet tracing) where you -// don't know if there was a connection or which side initiated it. This also -// covers unidirectional UDP flows and peer-to-peer communication where the -// "user-facing" surface of the protocol / API doesn't expose a clear notion of -// client and server. -const ( - // SourceAddressKey is the attribute Key conforming to the "source.address" - // semantic conventions. It represents the source address - domain name if - // available without reverse DNS lookup; otherwise, IP address or Unix - // domain socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the destination side, and when communicating - // through an intermediary, `source.address` SHOULD represent the source - // address behind any intermediaries, for example proxies, if it's - // available. - SourceAddressKey = attribute.Key("source.address") - - // SourcePortKey is the attribute Key conforming to the "source.port" - // semantic conventions. It represents the source port number - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3389, 2888 - SourcePortKey = attribute.Key("source.port") -) - -// SourceAddress returns an attribute KeyValue conforming to the -// "source.address" semantic conventions. It represents the source address - -// domain name if available without reverse DNS lookup; otherwise, IP address -// or Unix domain socket name. -func SourceAddress(val string) attribute.KeyValue { - return SourceAddressKey.String(val) -} - -// SourcePort returns an attribute KeyValue conforming to the "source.port" -// semantic conventions. It represents the source port number -func SourcePort(val int) attribute.KeyValue { - return SourcePortKey.Int(val) -} - -// Describes System attributes -const ( - // SystemDeviceKey is the attribute Key conforming to the "system.device" - // semantic conventions. It represents the device identifier - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '(identifier)' - SystemDeviceKey = attribute.Key("system.device") -) - -// SystemDevice returns an attribute KeyValue conforming to the -// "system.device" semantic conventions. It represents the device identifier -func SystemDevice(val string) attribute.KeyValue { - return SystemDeviceKey.String(val) -} - -// Describes System CPU attributes -const ( - // SystemCPULogicalNumberKey is the attribute Key conforming to the - // "system.cpu.logical_number" semantic conventions. It represents the - // logical CPU number [0..n-1] - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1 - SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") - - // SystemCPUStateKey is the attribute Key conforming to the - // "system.cpu.state" semantic conventions. It represents the state of the - // CPU - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'idle', 'interrupt' - SystemCPUStateKey = attribute.Key("system.cpu.state") -) - -var ( - // user - SystemCPUStateUser = SystemCPUStateKey.String("user") - // system - SystemCPUStateSystem = SystemCPUStateKey.String("system") - // nice - SystemCPUStateNice = SystemCPUStateKey.String("nice") - // idle - SystemCPUStateIdle = SystemCPUStateKey.String("idle") - // iowait - SystemCPUStateIowait = SystemCPUStateKey.String("iowait") - // interrupt - SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt") - // steal - SystemCPUStateSteal = SystemCPUStateKey.String("steal") -) - -// SystemCPULogicalNumber returns an attribute KeyValue conforming to the -// "system.cpu.logical_number" semantic conventions. It represents the logical -// CPU number [0..n-1] -func SystemCPULogicalNumber(val int) attribute.KeyValue { - return SystemCPULogicalNumberKey.Int(val) -} - -// Describes System Memory attributes -const ( - // SystemMemoryStateKey is the attribute Key conforming to the - // "system.memory.state" semantic conventions. It represents the memory - // state - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'free', 'cached' - SystemMemoryStateKey = attribute.Key("system.memory.state") -) - -var ( - // used - SystemMemoryStateUsed = SystemMemoryStateKey.String("used") - // free - SystemMemoryStateFree = SystemMemoryStateKey.String("free") - // shared - SystemMemoryStateShared = SystemMemoryStateKey.String("shared") - // buffers - SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") - // cached - SystemMemoryStateCached = SystemMemoryStateKey.String("cached") -) - -// Describes System Memory Paging attributes -const ( - // SystemPagingDirectionKey is the attribute Key conforming to the - // "system.paging.direction" semantic conventions. It represents the paging - // access direction - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'in' - SystemPagingDirectionKey = attribute.Key("system.paging.direction") - - // SystemPagingStateKey is the attribute Key conforming to the - // "system.paging.state" semantic conventions. It represents the memory - // paging state - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'free' - SystemPagingStateKey = attribute.Key("system.paging.state") - - // SystemPagingTypeKey is the attribute Key conforming to the - // "system.paging.type" semantic conventions. It represents the memory - // paging type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'minor' - SystemPagingTypeKey = attribute.Key("system.paging.type") -) - -var ( - // in - SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") - // out - SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") -) - -var ( - // used - SystemPagingStateUsed = SystemPagingStateKey.String("used") - // free - SystemPagingStateFree = SystemPagingStateKey.String("free") -) - -var ( - // major - SystemPagingTypeMajor = SystemPagingTypeKey.String("major") - // minor - SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") -) - -// Describes Filesystem attributes -const ( - // SystemFilesystemModeKey is the attribute Key conforming to the - // "system.filesystem.mode" semantic conventions. It represents the - // filesystem mode - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'rw, ro' - SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") - - // SystemFilesystemMountpointKey is the attribute Key conforming to the - // "system.filesystem.mountpoint" semantic conventions. It represents the - // filesystem mount path - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/mnt/data' - SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") - - // SystemFilesystemStateKey is the attribute Key conforming to the - // "system.filesystem.state" semantic conventions. It represents the - // filesystem state - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'used' - SystemFilesystemStateKey = attribute.Key("system.filesystem.state") - - // SystemFilesystemTypeKey is the attribute Key conforming to the - // "system.filesystem.type" semantic conventions. It represents the - // filesystem type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ext4' - SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") -) - -var ( - // used - SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") - // free - SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") - // reserved - SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") -) - -var ( - // fat32 - SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") - // exfat - SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") - // ntfs - SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") - // refs - SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") - // hfsplus - SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") - // ext4 - SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") -) - -// SystemFilesystemMode returns an attribute KeyValue conforming to the -// "system.filesystem.mode" semantic conventions. It represents the filesystem -// mode -func SystemFilesystemMode(val string) attribute.KeyValue { - return SystemFilesystemModeKey.String(val) -} - -// SystemFilesystemMountpoint returns an attribute KeyValue conforming to -// the "system.filesystem.mountpoint" semantic conventions. It represents the -// filesystem mount path -func SystemFilesystemMountpoint(val string) attribute.KeyValue { - return SystemFilesystemMountpointKey.String(val) -} - -// Describes Network attributes -const ( - // SystemNetworkStateKey is the attribute Key conforming to the - // "system.network.state" semantic conventions. It represents a stateless - // protocol MUST NOT set this attribute - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'close_wait' - SystemNetworkStateKey = attribute.Key("system.network.state") -) - -var ( - // close - SystemNetworkStateClose = SystemNetworkStateKey.String("close") - // close_wait - SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait") - // closing - SystemNetworkStateClosing = SystemNetworkStateKey.String("closing") - // delete - SystemNetworkStateDelete = SystemNetworkStateKey.String("delete") - // established - SystemNetworkStateEstablished = SystemNetworkStateKey.String("established") - // fin_wait_1 - SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1") - // fin_wait_2 - SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2") - // last_ack - SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack") - // listen - SystemNetworkStateListen = SystemNetworkStateKey.String("listen") - // syn_recv - SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv") - // syn_sent - SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent") - // time_wait - SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait") -) - -// Describes System Process attributes -const ( - // SystemProcessStatusKey is the attribute Key conforming to the - // "system.process.status" semantic conventions. It represents the process - // state, e.g., [Linux Process State - // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'running' - SystemProcessStatusKey = attribute.Key("system.process.status") -) - -var ( - // running - SystemProcessStatusRunning = SystemProcessStatusKey.String("running") - // sleeping - SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping") - // stopped - SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped") - // defunct - SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct") -) - -// Attributes for telemetry SDK. -const ( - // TelemetrySDKLanguageKey is the attribute Key conforming to the - // "telemetry.sdk.language" semantic conventions. It represents the - // language of the telemetry SDK. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") - - // TelemetrySDKNameKey is the attribute Key conforming to the - // "telemetry.sdk.name" semantic conventions. It represents the name of the - // telemetry SDK as defined above. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'opentelemetry' - // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute - // to `opentelemetry`. - // If another SDK, like a fork or a vendor-provided implementation, is - // used, this SDK MUST set the - // `telemetry.sdk.name` attribute to the fully-qualified class or module - // name of this SDK's main entry point - // or another suitable identifier depending on the language. - // The identifier `opentelemetry` is reserved and MUST NOT be used in this - // case. - // All custom identifiers SHOULD be stable across different versions of an - // implementation. - TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") - - // TelemetrySDKVersionKey is the attribute Key conforming to the - // "telemetry.sdk.version" semantic conventions. It represents the version - // string of the telemetry SDK. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: '1.2.3' - TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") - - // TelemetryDistroNameKey is the attribute Key conforming to the - // "telemetry.distro.name" semantic conventions. It represents the name of - // the auto instrumentation agent or distribution, if used. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'parts-unlimited-java' - // Note: Official auto instrumentation agents and distributions SHOULD set - // the `telemetry.distro.name` attribute to - // a string starting with `opentelemetry-`, e.g. - // `opentelemetry-java-instrumentation`. - TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") - - // TelemetryDistroVersionKey is the attribute Key conforming to the - // "telemetry.distro.version" semantic conventions. It represents the - // version string of the auto instrumentation agent or distribution, if - // used. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.2.3' - TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") -) - -var ( - // cpp - TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") - // dotnet - TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") - // erlang - TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") - // go - TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") - // java - TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") - // nodejs - TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") - // php - TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") - // python - TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") - // ruby - TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") - // rust - TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") - // swift - TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") - // webjs - TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") -) - -// TelemetrySDKName returns an attribute KeyValue conforming to the -// "telemetry.sdk.name" semantic conventions. It represents the name of the -// telemetry SDK as defined above. -func TelemetrySDKName(val string) attribute.KeyValue { - return TelemetrySDKNameKey.String(val) -} - -// TelemetrySDKVersion returns an attribute KeyValue conforming to the -// "telemetry.sdk.version" semantic conventions. It represents the version -// string of the telemetry SDK. -func TelemetrySDKVersion(val string) attribute.KeyValue { - return TelemetrySDKVersionKey.String(val) -} - -// TelemetryDistroName returns an attribute KeyValue conforming to the -// "telemetry.distro.name" semantic conventions. It represents the name of the -// auto instrumentation agent or distribution, if used. -func TelemetryDistroName(val string) attribute.KeyValue { - return TelemetryDistroNameKey.String(val) -} - -// TelemetryDistroVersion returns an attribute KeyValue conforming to the -// "telemetry.distro.version" semantic conventions. It represents the version -// string of the auto instrumentation agent or distribution, if used. -func TelemetryDistroVersion(val string) attribute.KeyValue { - return TelemetryDistroVersionKey.String(val) -} - -// These attributes may be used for any operation to store information about a -// thread that started a span. -const ( - // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic - // conventions. It represents the current "managed" thread ID (as opposed - // to OS thread ID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - ThreadIDKey = attribute.Key("thread.id") - - // ThreadNameKey is the attribute Key conforming to the "thread.name" - // semantic conventions. It represents the current thread name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'main' - ThreadNameKey = attribute.Key("thread.name") -) - -// ThreadID returns an attribute KeyValue conforming to the "thread.id" -// semantic conventions. It represents the current "managed" thread ID (as -// opposed to OS thread ID). -func ThreadID(val int) attribute.KeyValue { - return ThreadIDKey.Int(val) -} - -// ThreadName returns an attribute KeyValue conforming to the "thread.name" -// semantic conventions. It represents the current thread name. -func ThreadName(val string) attribute.KeyValue { - return ThreadNameKey.String(val) -} - -// Semantic convention attributes in the TLS namespace. -const ( - // TLSCipherKey is the attribute Key conforming to the "tls.cipher" - // semantic conventions. It represents the string indicating the - // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) - // used during the current connection. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', - // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' - // Note: The values allowed for `tls.cipher` MUST be one of the - // `Descriptions` of the [registered TLS Cipher - // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4). - TLSCipherKey = attribute.Key("tls.cipher") - - // TLSClientCertificateKey is the attribute Key conforming to the - // "tls.client.certificate" semantic conventions. It represents the - // pEM-encoded stand-alone certificate offered by the client. This is - // usually mutually-exclusive of `client.certificate_chain` since this - // value also exists in that list. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...' - TLSClientCertificateKey = attribute.Key("tls.client.certificate") - - // TLSClientCertificateChainKey is the attribute Key conforming to the - // "tls.client.certificate_chain" semantic conventions. It represents the - // array of PEM-encoded certificates that make up the certificate chain - // offered by the client. This is usually mutually-exclusive of - // `client.certificate` since that value should be the first certificate in - // the chain. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...', 'MI...' - TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") - - // TLSClientHashMd5Key is the attribute Key conforming to the - // "tls.client.hash.md5" semantic conventions. It represents the - // certificate fingerprint using the MD5 digest of DER-encoded version of - // certificate offered by the client. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' - TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") - - // TLSClientHashSha1Key is the attribute Key conforming to the - // "tls.client.hash.sha1" semantic conventions. It represents the - // certificate fingerprint using the SHA1 digest of DER-encoded version of - // certificate offered by the client. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' - TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") - - // TLSClientHashSha256Key is the attribute Key conforming to the - // "tls.client.hash.sha256" semantic conventions. It represents the - // certificate fingerprint using the SHA256 digest of DER-encoded version - // of certificate offered by the client. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' - TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") - - // TLSClientIssuerKey is the attribute Key conforming to the - // "tls.client.issuer" semantic conventions. It represents the - // distinguished name of - // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) - // of the issuer of the x.509 certificate presented by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, - // DC=com' - TLSClientIssuerKey = attribute.Key("tls.client.issuer") - - // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" - // semantic conventions. It represents a hash that identifies clients based - // on how they perform an SSL/TLS handshake. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'd4e5b18d6b55c71272893221c96ba240' - TLSClientJa3Key = attribute.Key("tls.client.ja3") - - // TLSClientNotAfterKey is the attribute Key conforming to the - // "tls.client.not_after" semantic conventions. It represents the date/Time - // indicating when client certificate is no longer considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2021-01-01T00:00:00.000Z' - TLSClientNotAfterKey = attribute.Key("tls.client.not_after") - - // TLSClientNotBeforeKey is the attribute Key conforming to the - // "tls.client.not_before" semantic conventions. It represents the - // date/Time indicating when client certificate is first considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1970-01-01T00:00:00.000Z' - TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") - - // TLSClientServerNameKey is the attribute Key conforming to the - // "tls.client.server_name" semantic conventions. It represents the also - // called an SNI, this tells the server which hostname to which the client - // is attempting to connect to. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry.io' - TLSClientServerNameKey = attribute.Key("tls.client.server_name") - - // TLSClientSubjectKey is the attribute Key conforming to the - // "tls.client.subject" semantic conventions. It represents the - // distinguished name of subject of the x.509 certificate presented by the - // client. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com' - TLSClientSubjectKey = attribute.Key("tls.client.subject") - - // TLSClientSupportedCiphersKey is the attribute Key conforming to the - // "tls.client.supported_ciphers" semantic conventions. It represents the - // array of ciphers offered by the client during the client hello. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."' - TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") - - // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic - // conventions. It represents the string indicating the curve used for the - // given cipher, when applicable - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'secp256r1' - TLSCurveKey = attribute.Key("tls.curve") - - // TLSEstablishedKey is the attribute Key conforming to the - // "tls.established" semantic conventions. It represents the boolean flag - // indicating if the TLS negotiation was successful and transitioned to an - // encrypted tunnel. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Examples: True - TLSEstablishedKey = attribute.Key("tls.established") - - // TLSNextProtocolKey is the attribute Key conforming to the - // "tls.next_protocol" semantic conventions. It represents the string - // indicating the protocol being tunneled. Per the values in the [IANA - // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), - // this string should be lower case. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'http/1.1' - TLSNextProtocolKey = attribute.Key("tls.next_protocol") - - // TLSProtocolNameKey is the attribute Key conforming to the - // "tls.protocol.name" semantic conventions. It represents the normalized - // lowercase protocol name parsed from original string of the negotiated - // [SSL/TLS protocol - // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - TLSProtocolNameKey = attribute.Key("tls.protocol.name") - - // TLSProtocolVersionKey is the attribute Key conforming to the - // "tls.protocol.version" semantic conventions. It represents the numeric - // part of the version parsed from the original string of the negotiated - // [SSL/TLS protocol - // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.2', '3' - TLSProtocolVersionKey = attribute.Key("tls.protocol.version") - - // TLSResumedKey is the attribute Key conforming to the "tls.resumed" - // semantic conventions. It represents the boolean flag indicating if this - // TLS connection was resumed from an existing TLS negotiation. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Examples: True - TLSResumedKey = attribute.Key("tls.resumed") - - // TLSServerCertificateKey is the attribute Key conforming to the - // "tls.server.certificate" semantic conventions. It represents the - // pEM-encoded stand-alone certificate offered by the server. This is - // usually mutually-exclusive of `server.certificate_chain` since this - // value also exists in that list. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...' - TLSServerCertificateKey = attribute.Key("tls.server.certificate") - - // TLSServerCertificateChainKey is the attribute Key conforming to the - // "tls.server.certificate_chain" semantic conventions. It represents the - // array of PEM-encoded certificates that make up the certificate chain - // offered by the server. This is usually mutually-exclusive of - // `server.certificate` since that value should be the first certificate in - // the chain. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...', 'MI...' - TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") - - // TLSServerHashMd5Key is the attribute Key conforming to the - // "tls.server.hash.md5" semantic conventions. It represents the - // certificate fingerprint using the MD5 digest of DER-encoded version of - // certificate offered by the server. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' - TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") - - // TLSServerHashSha1Key is the attribute Key conforming to the - // "tls.server.hash.sha1" semantic conventions. It represents the - // certificate fingerprint using the SHA1 digest of DER-encoded version of - // certificate offered by the server. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' - TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") - - // TLSServerHashSha256Key is the attribute Key conforming to the - // "tls.server.hash.sha256" semantic conventions. It represents the - // certificate fingerprint using the SHA256 digest of DER-encoded version - // of certificate offered by the server. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' - TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") - - // TLSServerIssuerKey is the attribute Key conforming to the - // "tls.server.issuer" semantic conventions. It represents the - // distinguished name of - // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) - // of the issuer of the x.509 certificate presented by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, - // DC=com' - TLSServerIssuerKey = attribute.Key("tls.server.issuer") - - // TLSServerJa3sKey is the attribute Key conforming to the - // "tls.server.ja3s" semantic conventions. It represents a hash that - // identifies servers based on how they perform an SSL/TLS handshake. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'd4e5b18d6b55c71272893221c96ba240' - TLSServerJa3sKey = attribute.Key("tls.server.ja3s") - - // TLSServerNotAfterKey is the attribute Key conforming to the - // "tls.server.not_after" semantic conventions. It represents the date/Time - // indicating when server certificate is no longer considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2021-01-01T00:00:00.000Z' - TLSServerNotAfterKey = attribute.Key("tls.server.not_after") - - // TLSServerNotBeforeKey is the attribute Key conforming to the - // "tls.server.not_before" semantic conventions. It represents the - // date/Time indicating when server certificate is first considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1970-01-01T00:00:00.000Z' - TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") - - // TLSServerSubjectKey is the attribute Key conforming to the - // "tls.server.subject" semantic conventions. It represents the - // distinguished name of subject of the x.509 certificate presented by the - // server. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com' - TLSServerSubjectKey = attribute.Key("tls.server.subject") -) - -var ( - // ssl - TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") - // tls - TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") -) - -// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" -// semantic conventions. It represents the string indicating the -// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used -// during the current connection. -func TLSCipher(val string) attribute.KeyValue { - return TLSCipherKey.String(val) -} - -// TLSClientCertificate returns an attribute KeyValue conforming to the -// "tls.client.certificate" semantic conventions. It represents the pEM-encoded -// stand-alone certificate offered by the client. This is usually -// mutually-exclusive of `client.certificate_chain` since this value also -// exists in that list. -func TLSClientCertificate(val string) attribute.KeyValue { - return TLSClientCertificateKey.String(val) -} - -// TLSClientCertificateChain returns an attribute KeyValue conforming to the -// "tls.client.certificate_chain" semantic conventions. It represents the array -// of PEM-encoded certificates that make up the certificate chain offered by -// the client. This is usually mutually-exclusive of `client.certificate` since -// that value should be the first certificate in the chain. -func TLSClientCertificateChain(val ...string) attribute.KeyValue { - return TLSClientCertificateChainKey.StringSlice(val) -} - -// TLSClientHashMd5 returns an attribute KeyValue conforming to the -// "tls.client.hash.md5" semantic conventions. It represents the certificate -// fingerprint using the MD5 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashMd5(val string) attribute.KeyValue { - return TLSClientHashMd5Key.String(val) -} - -// TLSClientHashSha1 returns an attribute KeyValue conforming to the -// "tls.client.hash.sha1" semantic conventions. It represents the certificate -// fingerprint using the SHA1 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashSha1(val string) attribute.KeyValue { - return TLSClientHashSha1Key.String(val) -} - -// TLSClientHashSha256 returns an attribute KeyValue conforming to the -// "tls.client.hash.sha256" semantic conventions. It represents the certificate -// fingerprint using the SHA256 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashSha256(val string) attribute.KeyValue { - return TLSClientHashSha256Key.String(val) -} - -// TLSClientIssuer returns an attribute KeyValue conforming to the -// "tls.client.issuer" semantic conventions. It represents the distinguished -// name of -// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of -// the issuer of the x.509 certificate presented by the client. -func TLSClientIssuer(val string) attribute.KeyValue { - return TLSClientIssuerKey.String(val) -} - -// TLSClientJa3 returns an attribute KeyValue conforming to the -// "tls.client.ja3" semantic conventions. It represents a hash that identifies -// clients based on how they perform an SSL/TLS handshake. -func TLSClientJa3(val string) attribute.KeyValue { - return TLSClientJa3Key.String(val) -} - -// TLSClientNotAfter returns an attribute KeyValue conforming to the -// "tls.client.not_after" semantic conventions. It represents the date/Time -// indicating when client certificate is no longer considered valid. -func TLSClientNotAfter(val string) attribute.KeyValue { - return TLSClientNotAfterKey.String(val) -} - -// TLSClientNotBefore returns an attribute KeyValue conforming to the -// "tls.client.not_before" semantic conventions. It represents the date/Time -// indicating when client certificate is first considered valid. -func TLSClientNotBefore(val string) attribute.KeyValue { - return TLSClientNotBeforeKey.String(val) -} - -// TLSClientServerName returns an attribute KeyValue conforming to the -// "tls.client.server_name" semantic conventions. It represents the also called -// an SNI, this tells the server which hostname to which the client is -// attempting to connect to. -func TLSClientServerName(val string) attribute.KeyValue { - return TLSClientServerNameKey.String(val) -} - -// TLSClientSubject returns an attribute KeyValue conforming to the -// "tls.client.subject" semantic conventions. It represents the distinguished -// name of subject of the x.509 certificate presented by the client. -func TLSClientSubject(val string) attribute.KeyValue { - return TLSClientSubjectKey.String(val) -} - -// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the -// "tls.client.supported_ciphers" semantic conventions. It represents the array -// of ciphers offered by the client during the client hello. -func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { - return TLSClientSupportedCiphersKey.StringSlice(val) -} - -// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" -// semantic conventions. It represents the string indicating the curve used for -// the given cipher, when applicable -func TLSCurve(val string) attribute.KeyValue { - return TLSCurveKey.String(val) -} - -// TLSEstablished returns an attribute KeyValue conforming to the -// "tls.established" semantic conventions. It represents the boolean flag -// indicating if the TLS negotiation was successful and transitioned to an -// encrypted tunnel. -func TLSEstablished(val bool) attribute.KeyValue { - return TLSEstablishedKey.Bool(val) -} - -// TLSNextProtocol returns an attribute KeyValue conforming to the -// "tls.next_protocol" semantic conventions. It represents the string -// indicating the protocol being tunneled. Per the values in the [IANA -// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), -// this string should be lower case. -func TLSNextProtocol(val string) attribute.KeyValue { - return TLSNextProtocolKey.String(val) -} - -// TLSProtocolVersion returns an attribute KeyValue conforming to the -// "tls.protocol.version" semantic conventions. It represents the numeric part -// of the version parsed from the original string of the negotiated [SSL/TLS -// protocol -// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) -func TLSProtocolVersion(val string) attribute.KeyValue { - return TLSProtocolVersionKey.String(val) -} - -// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" -// semantic conventions. It represents the boolean flag indicating if this TLS -// connection was resumed from an existing TLS negotiation. -func TLSResumed(val bool) attribute.KeyValue { - return TLSResumedKey.Bool(val) -} - -// TLSServerCertificate returns an attribute KeyValue conforming to the -// "tls.server.certificate" semantic conventions. It represents the pEM-encoded -// stand-alone certificate offered by the server. This is usually -// mutually-exclusive of `server.certificate_chain` since this value also -// exists in that list. -func TLSServerCertificate(val string) attribute.KeyValue { - return TLSServerCertificateKey.String(val) -} - -// TLSServerCertificateChain returns an attribute KeyValue conforming to the -// "tls.server.certificate_chain" semantic conventions. It represents the array -// of PEM-encoded certificates that make up the certificate chain offered by -// the server. This is usually mutually-exclusive of `server.certificate` since -// that value should be the first certificate in the chain. -func TLSServerCertificateChain(val ...string) attribute.KeyValue { - return TLSServerCertificateChainKey.StringSlice(val) -} - -// TLSServerHashMd5 returns an attribute KeyValue conforming to the -// "tls.server.hash.md5" semantic conventions. It represents the certificate -// fingerprint using the MD5 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashMd5(val string) attribute.KeyValue { - return TLSServerHashMd5Key.String(val) -} - -// TLSServerHashSha1 returns an attribute KeyValue conforming to the -// "tls.server.hash.sha1" semantic conventions. It represents the certificate -// fingerprint using the SHA1 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashSha1(val string) attribute.KeyValue { - return TLSServerHashSha1Key.String(val) -} - -// TLSServerHashSha256 returns an attribute KeyValue conforming to the -// "tls.server.hash.sha256" semantic conventions. It represents the certificate -// fingerprint using the SHA256 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashSha256(val string) attribute.KeyValue { - return TLSServerHashSha256Key.String(val) -} - -// TLSServerIssuer returns an attribute KeyValue conforming to the -// "tls.server.issuer" semantic conventions. It represents the distinguished -// name of -// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of -// the issuer of the x.509 certificate presented by the client. -func TLSServerIssuer(val string) attribute.KeyValue { - return TLSServerIssuerKey.String(val) -} - -// TLSServerJa3s returns an attribute KeyValue conforming to the -// "tls.server.ja3s" semantic conventions. It represents a hash that identifies -// servers based on how they perform an SSL/TLS handshake. -func TLSServerJa3s(val string) attribute.KeyValue { - return TLSServerJa3sKey.String(val) -} - -// TLSServerNotAfter returns an attribute KeyValue conforming to the -// "tls.server.not_after" semantic conventions. It represents the date/Time -// indicating when server certificate is no longer considered valid. -func TLSServerNotAfter(val string) attribute.KeyValue { - return TLSServerNotAfterKey.String(val) -} - -// TLSServerNotBefore returns an attribute KeyValue conforming to the -// "tls.server.not_before" semantic conventions. It represents the date/Time -// indicating when server certificate is first considered valid. -func TLSServerNotBefore(val string) attribute.KeyValue { - return TLSServerNotBeforeKey.String(val) -} - -// TLSServerSubject returns an attribute KeyValue conforming to the -// "tls.server.subject" semantic conventions. It represents the distinguished -// name of subject of the x.509 certificate presented by the server. -func TLSServerSubject(val string) attribute.KeyValue { - return TLSServerSubjectKey.String(val) -} - -// Attributes describing URL. -const ( - // URLDomainKey is the attribute Key conforming to the "url.domain" - // semantic conventions. It represents the domain extracted from the - // `url.full`, such as "opentelemetry.io". - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'www.foo.bar', 'opentelemetry.io', '3.12.167.2', - // '[1080:0:0:0:8:800:200C:417A]' - // Note: In some cases a URL may refer to an IP and/or port directly, - // without a domain name. In this case, the IP address would go to the - // domain field. If the URL contains a [literal IPv6 - // address](https://www.rfc-editor.org/rfc/rfc2732#section-2) enclosed by - // `[` and `]`, the `[` and `]` characters should also be captured in the - // domain field. - URLDomainKey = attribute.Key("url.domain") - - // URLExtensionKey is the attribute Key conforming to the "url.extension" - // semantic conventions. It represents the file extension extracted from - // the `url.full`, excluding the leading dot. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'png', 'gz' - // Note: The file extension is only set if it exists, as not every url has - // a file extension. When the file name has multiple extensions - // `example.tar.gz`, only the last one should be captured `gz`, not - // `tar.gz`. - URLExtensionKey = attribute.Key("url.extension") - - // URLFragmentKey is the attribute Key conforming to the "url.fragment" - // semantic conventions. It represents the [URI - // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'SemConv' - URLFragmentKey = attribute.Key("url.fragment") - - // URLFullKey is the attribute Key conforming to the "url.full" semantic - // conventions. It represents the absolute URL describing a network - // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', - // '//localhost' - // Note: For network calls, URL usually has - // `scheme://host[:port][path][?query][#fragment]` format, where the - // fragment is not transmitted over HTTP, but if it is known, it SHOULD be - // included nevertheless. - // `url.full` MUST NOT contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. In such case username and - // password SHOULD be redacted and attribute's value SHOULD be - // `https://REDACTED:REDACTED@www.example.com/`. - // `url.full` SHOULD capture the absolute URL when it is available (or can - // be reconstructed). Sensitive content provided in `url.full` SHOULD be - // scrubbed when instrumentations can identify it. - URLFullKey = attribute.Key("url.full") - - // URLOriginalKey is the attribute Key conforming to the "url.original" - // semantic conventions. It represents the unmodified original URL as seen - // in the event source. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', - // 'search?q=OpenTelemetry' - // Note: In network monitoring, the observed URL may be a full URL, whereas - // in access logs, the URL is often just represented as a path. This field - // is meant to represent the URL as it was observed, complete or not. - // `url.original` might contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. In such case password and - // username SHOULD NOT be redacted and attribute's value SHOULD remain the - // same. - URLOriginalKey = attribute.Key("url.original") - - // URLPathKey is the attribute Key conforming to the "url.path" semantic - // conventions. It represents the [URI - // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/search' - // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when - // instrumentations can identify it. - URLPathKey = attribute.Key("url.path") - - // URLPortKey is the attribute Key conforming to the "url.port" semantic - // conventions. It represents the port extracted from the `url.full` - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 443 - URLPortKey = attribute.Key("url.port") - - // URLQueryKey is the attribute Key conforming to the "url.query" semantic - // conventions. It represents the [URI - // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'q=OpenTelemetry' - // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when - // instrumentations can identify it. - URLQueryKey = attribute.Key("url.query") - - // URLRegisteredDomainKey is the attribute Key conforming to the - // "url.registered_domain" semantic conventions. It represents the highest - // registered url domain, stripped of the subdomain. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'example.com', 'foo.co.uk' - // Note: This value can be determined precisely with the [public suffix - // list](http://publicsuffix.org). For example, the registered domain for - // `foo.example.com` is `example.com`. Trying to approximate this by simply - // taking the last two labels will not work well for TLDs such as `co.uk`. - URLRegisteredDomainKey = attribute.Key("url.registered_domain") - - // URLSchemeKey is the attribute Key conforming to the "url.scheme" - // semantic conventions. It represents the [URI - // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component - // identifying the used protocol. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'https', 'ftp', 'telnet' - URLSchemeKey = attribute.Key("url.scheme") - - // URLSubdomainKey is the attribute Key conforming to the "url.subdomain" - // semantic conventions. It represents the subdomain portion of a fully - // qualified domain name includes all of the names except the host name - // under the registered_domain. In a partially qualified domain, or if the - // qualification level of the full name cannot be determined, subdomain - // contains all of the names below the registered domain. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'east', 'sub2.sub1' - // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If - // the domain has multiple levels of subdomain, such as - // `sub2.sub1.example.com`, the subdomain field should contain `sub2.sub1`, - // with no trailing period. - URLSubdomainKey = attribute.Key("url.subdomain") - - // URLTemplateKey is the attribute Key conforming to the "url.template" - // semantic conventions. It represents the low-cardinality template of an - // [absolute path - // reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/users/{id}', '/users/:id', '/users?id={id}' - URLTemplateKey = attribute.Key("url.template") - - // URLTopLevelDomainKey is the attribute Key conforming to the - // "url.top_level_domain" semantic conventions. It represents the effective - // top level domain (eTLD), also known as the domain suffix, is the last - // part of the domain name. For example, the top level domain for - // example.com is `com`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'com', 'co.uk' - // Note: This value can be determined precisely with the [public suffix - // list](http://publicsuffix.org). - URLTopLevelDomainKey = attribute.Key("url.top_level_domain") -) - -// URLDomain returns an attribute KeyValue conforming to the "url.domain" -// semantic conventions. It represents the domain extracted from the -// `url.full`, such as "opentelemetry.io". -func URLDomain(val string) attribute.KeyValue { - return URLDomainKey.String(val) -} - -// URLExtension returns an attribute KeyValue conforming to the -// "url.extension" semantic conventions. It represents the file extension -// extracted from the `url.full`, excluding the leading dot. -func URLExtension(val string) attribute.KeyValue { - return URLExtensionKey.String(val) -} - -// URLFragment returns an attribute KeyValue conforming to the -// "url.fragment" semantic conventions. It represents the [URI -// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component -func URLFragment(val string) attribute.KeyValue { - return URLFragmentKey.String(val) -} - -// URLFull returns an attribute KeyValue conforming to the "url.full" -// semantic conventions. It represents the absolute URL describing a network -// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) -func URLFull(val string) attribute.KeyValue { - return URLFullKey.String(val) -} - -// URLOriginal returns an attribute KeyValue conforming to the -// "url.original" semantic conventions. It represents the unmodified original -// URL as seen in the event source. -func URLOriginal(val string) attribute.KeyValue { - return URLOriginalKey.String(val) -} - -// URLPath returns an attribute KeyValue conforming to the "url.path" -// semantic conventions. It represents the [URI -// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component -func URLPath(val string) attribute.KeyValue { - return URLPathKey.String(val) -} - -// URLPort returns an attribute KeyValue conforming to the "url.port" -// semantic conventions. It represents the port extracted from the `url.full` -func URLPort(val int) attribute.KeyValue { - return URLPortKey.Int(val) -} - -// URLQuery returns an attribute KeyValue conforming to the "url.query" -// semantic conventions. It represents the [URI -// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component -func URLQuery(val string) attribute.KeyValue { - return URLQueryKey.String(val) -} - -// URLRegisteredDomain returns an attribute KeyValue conforming to the -// "url.registered_domain" semantic conventions. It represents the highest -// registered url domain, stripped of the subdomain. -func URLRegisteredDomain(val string) attribute.KeyValue { - return URLRegisteredDomainKey.String(val) -} - -// URLScheme returns an attribute KeyValue conforming to the "url.scheme" -// semantic conventions. It represents the [URI -// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component -// identifying the used protocol. -func URLScheme(val string) attribute.KeyValue { - return URLSchemeKey.String(val) -} - -// URLSubdomain returns an attribute KeyValue conforming to the -// "url.subdomain" semantic conventions. It represents the subdomain portion of -// a fully qualified domain name includes all of the names except the host name -// under the registered_domain. In a partially qualified domain, or if the -// qualification level of the full name cannot be determined, subdomain -// contains all of the names below the registered domain. -func URLSubdomain(val string) attribute.KeyValue { - return URLSubdomainKey.String(val) -} - -// URLTemplate returns an attribute KeyValue conforming to the -// "url.template" semantic conventions. It represents the low-cardinality -// template of an [absolute path -// reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). -func URLTemplate(val string) attribute.KeyValue { - return URLTemplateKey.String(val) -} - -// URLTopLevelDomain returns an attribute KeyValue conforming to the -// "url.top_level_domain" semantic conventions. It represents the effective top -// level domain (eTLD), also known as the domain suffix, is the last part of -// the domain name. For example, the top level domain for example.com is `com`. -func URLTopLevelDomain(val string) attribute.KeyValue { - return URLTopLevelDomainKey.String(val) -} - -// Describes user-agent attributes. -const ( - // UserAgentNameKey is the attribute Key conforming to the - // "user_agent.name" semantic conventions. It represents the name of the - // user-agent extracted from original. Usually refers to the browser's - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Safari', 'YourApp' - // Note: [Example](https://www.whatsmyua.info) of extracting browser's name - // from original string. In the case of using a user-agent for non-browser - // products, such as microservices with multiple names/versions inside the - // `user_agent.original`, the most significant name SHOULD be selected. In - // such a scenario it should align with `user_agent.version` - UserAgentNameKey = attribute.Key("user_agent.name") - - // UserAgentOriginalKey is the attribute Key conforming to the - // "user_agent.original" semantic conventions. It represents the value of - // the [HTTP - // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) - // header sent by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU - // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) - // Version/14.1.2 Mobile/15E148 Safari/604.1', 'YourApp/1.0.0 - // grpc-java-okhttp/1.27.2' - UserAgentOriginalKey = attribute.Key("user_agent.original") - - // UserAgentVersionKey is the attribute Key conforming to the - // "user_agent.version" semantic conventions. It represents the version of - // the user-agent extracted from original. Usually refers to the browser's - // version - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '14.1.2', '1.0.0' - // Note: [Example](https://www.whatsmyua.info) of extracting browser's - // version from original string. In the case of using a user-agent for - // non-browser products, such as microservices with multiple names/versions - // inside the `user_agent.original`, the most significant version SHOULD be - // selected. In such a scenario it should align with `user_agent.name` - UserAgentVersionKey = attribute.Key("user_agent.version") -) - -// UserAgentName returns an attribute KeyValue conforming to the -// "user_agent.name" semantic conventions. It represents the name of the -// user-agent extracted from original. Usually refers to the browser's name. -func UserAgentName(val string) attribute.KeyValue { - return UserAgentNameKey.String(val) -} - -// UserAgentOriginal returns an attribute KeyValue conforming to the -// "user_agent.original" semantic conventions. It represents the value of the -// [HTTP -// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) -// header sent by the client. -func UserAgentOriginal(val string) attribute.KeyValue { - return UserAgentOriginalKey.String(val) -} - -// UserAgentVersion returns an attribute KeyValue conforming to the -// "user_agent.version" semantic conventions. It represents the version of the -// user-agent extracted from original. Usually refers to the browser's version -func UserAgentVersion(val string) attribute.KeyValue { - return UserAgentVersionKey.String(val) -} - -// The attributes used to describe the packaged software running the -// application code. -const ( - // WebEngineDescriptionKey is the attribute Key conforming to the - // "webengine.description" semantic conventions. It represents the - // additional description of the web engine (e.g. detailed version and - // edition information). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - - // 2.2.2.Final' - WebEngineDescriptionKey = attribute.Key("webengine.description") - - // WebEngineNameKey is the attribute Key conforming to the "webengine.name" - // semantic conventions. It represents the name of the web engine. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'WildFly' - WebEngineNameKey = attribute.Key("webengine.name") - - // WebEngineVersionKey is the attribute Key conforming to the - // "webengine.version" semantic conventions. It represents the version of - // the web engine. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '21.0.0' - WebEngineVersionKey = attribute.Key("webengine.version") -) - -// WebEngineDescription returns an attribute KeyValue conforming to the -// "webengine.description" semantic conventions. It represents the additional -// description of the web engine (e.g. detailed version and edition -// information). -func WebEngineDescription(val string) attribute.KeyValue { - return WebEngineDescriptionKey.String(val) -} - -// WebEngineName returns an attribute KeyValue conforming to the -// "webengine.name" semantic conventions. It represents the name of the web -// engine. -func WebEngineName(val string) attribute.KeyValue { - return WebEngineNameKey.String(val) -} - -// WebEngineVersion returns an attribute KeyValue conforming to the -// "webengine.version" semantic conventions. It represents the version of the -// web engine. -func WebEngineVersion(val string) attribute.KeyValue { - return WebEngineVersionKey.String(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go deleted file mode 100644 index d031bbea784..00000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package semconv implements OpenTelemetry semantic conventions. -// -// OpenTelemetry semantic conventions are agreed standardized naming -// patterns for OpenTelemetry things. This package represents the v1.26.0 -// version of the OpenTelemetry semantic conventions. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go deleted file mode 100644 index bfaee0d56e3..00000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" - -const ( - // ExceptionEventName is the name of the Span event representing an exception. - ExceptionEventName = "exception" -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go deleted file mode 100644 index fcdb9f48596..00000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go +++ /dev/null @@ -1,1307 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" - -const ( - - // ContainerCPUTime is the metric conforming to the "container.cpu.time" - // semantic conventions. It represents the total CPU time consumed. - // Instrument: counter - // Unit: s - // Stability: Experimental - ContainerCPUTimeName = "container.cpu.time" - ContainerCPUTimeUnit = "s" - ContainerCPUTimeDescription = "Total CPU time consumed" - - // ContainerMemoryUsage is the metric conforming to the - // "container.memory.usage" semantic conventions. It represents the memory - // usage of the container. - // Instrument: counter - // Unit: By - // Stability: Experimental - ContainerMemoryUsageName = "container.memory.usage" - ContainerMemoryUsageUnit = "By" - ContainerMemoryUsageDescription = "Memory usage of the container." - - // ContainerDiskIo is the metric conforming to the "container.disk.io" semantic - // conventions. It represents the disk bytes for the container. - // Instrument: counter - // Unit: By - // Stability: Experimental - ContainerDiskIoName = "container.disk.io" - ContainerDiskIoUnit = "By" - ContainerDiskIoDescription = "Disk bytes for the container." - - // ContainerNetworkIo is the metric conforming to the "container.network.io" - // semantic conventions. It represents the network bytes for the container. - // Instrument: counter - // Unit: By - // Stability: Experimental - ContainerNetworkIoName = "container.network.io" - ContainerNetworkIoUnit = "By" - ContainerNetworkIoDescription = "Network bytes for the container." - - // DBClientOperationDuration is the metric conforming to the - // "db.client.operation.duration" semantic conventions. It represents the - // duration of database client operations. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DBClientOperationDurationName = "db.client.operation.duration" - DBClientOperationDurationUnit = "s" - DBClientOperationDurationDescription = "Duration of database client operations." - - // DBClientConnectionCount is the metric conforming to the - // "db.client.connection.count" semantic conventions. It represents the number - // of connections that are currently in state described by the `state` - // attribute. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionCountName = "db.client.connection.count" - DBClientConnectionCountUnit = "{connection}" - DBClientConnectionCountDescription = "The number of connections that are currently in state described by the `state` attribute" - - // DBClientConnectionIdleMax is the metric conforming to the - // "db.client.connection.idle.max" semantic conventions. It represents the - // maximum number of idle open connections allowed. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionIdleMaxName = "db.client.connection.idle.max" - DBClientConnectionIdleMaxUnit = "{connection}" - DBClientConnectionIdleMaxDescription = "The maximum number of idle open connections allowed" - - // DBClientConnectionIdleMin is the metric conforming to the - // "db.client.connection.idle.min" semantic conventions. It represents the - // minimum number of idle open connections allowed. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionIdleMinName = "db.client.connection.idle.min" - DBClientConnectionIdleMinUnit = "{connection}" - DBClientConnectionIdleMinDescription = "The minimum number of idle open connections allowed" - - // DBClientConnectionMax is the metric conforming to the - // "db.client.connection.max" semantic conventions. It represents the maximum - // number of open connections allowed. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionMaxName = "db.client.connection.max" - DBClientConnectionMaxUnit = "{connection}" - DBClientConnectionMaxDescription = "The maximum number of open connections allowed" - - // DBClientConnectionPendingRequests is the metric conforming to the - // "db.client.connection.pending_requests" semantic conventions. It represents - // the number of pending requests for an open connection, cumulative for the - // entire pool. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - DBClientConnectionPendingRequestsName = "db.client.connection.pending_requests" - DBClientConnectionPendingRequestsUnit = "{request}" - DBClientConnectionPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool" - - // DBClientConnectionTimeouts is the metric conforming to the - // "db.client.connection.timeouts" semantic conventions. It represents the - // number of connection timeouts that have occurred trying to obtain a - // connection from the pool. - // Instrument: counter - // Unit: {timeout} - // Stability: Experimental - DBClientConnectionTimeoutsName = "db.client.connection.timeouts" - DBClientConnectionTimeoutsUnit = "{timeout}" - DBClientConnectionTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool" - - // DBClientConnectionCreateTime is the metric conforming to the - // "db.client.connection.create_time" semantic conventions. It represents the - // time it took to create a new connection. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DBClientConnectionCreateTimeName = "db.client.connection.create_time" - DBClientConnectionCreateTimeUnit = "s" - DBClientConnectionCreateTimeDescription = "The time it took to create a new connection" - - // DBClientConnectionWaitTime is the metric conforming to the - // "db.client.connection.wait_time" semantic conventions. It represents the - // time it took to obtain an open connection from the pool. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DBClientConnectionWaitTimeName = "db.client.connection.wait_time" - DBClientConnectionWaitTimeUnit = "s" - DBClientConnectionWaitTimeDescription = "The time it took to obtain an open connection from the pool" - - // DBClientConnectionUseTime is the metric conforming to the - // "db.client.connection.use_time" semantic conventions. It represents the time - // between borrowing a connection and returning it to the pool. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DBClientConnectionUseTimeName = "db.client.connection.use_time" - DBClientConnectionUseTimeUnit = "s" - DBClientConnectionUseTimeDescription = "The time between borrowing a connection and returning it to the pool" - - // DBClientConnectionsUsage is the metric conforming to the - // "db.client.connections.usage" semantic conventions. It represents the - // deprecated, use `db.client.connection.count` instead. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionsUsageName = "db.client.connections.usage" - DBClientConnectionsUsageUnit = "{connection}" - DBClientConnectionsUsageDescription = "Deprecated, use `db.client.connection.count` instead." - - // DBClientConnectionsIdleMax is the metric conforming to the - // "db.client.connections.idle.max" semantic conventions. It represents the - // deprecated, use `db.client.connection.idle.max` instead. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionsIdleMaxName = "db.client.connections.idle.max" - DBClientConnectionsIdleMaxUnit = "{connection}" - DBClientConnectionsIdleMaxDescription = "Deprecated, use `db.client.connection.idle.max` instead." - - // DBClientConnectionsIdleMin is the metric conforming to the - // "db.client.connections.idle.min" semantic conventions. It represents the - // deprecated, use `db.client.connection.idle.min` instead. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionsIdleMinName = "db.client.connections.idle.min" - DBClientConnectionsIdleMinUnit = "{connection}" - DBClientConnectionsIdleMinDescription = "Deprecated, use `db.client.connection.idle.min` instead." - - // DBClientConnectionsMax is the metric conforming to the - // "db.client.connections.max" semantic conventions. It represents the - // deprecated, use `db.client.connection.max` instead. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionsMaxName = "db.client.connections.max" - DBClientConnectionsMaxUnit = "{connection}" - DBClientConnectionsMaxDescription = "Deprecated, use `db.client.connection.max` instead." - - // DBClientConnectionsPendingRequests is the metric conforming to the - // "db.client.connections.pending_requests" semantic conventions. It represents - // the deprecated, use `db.client.connection.pending_requests` instead. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests" - DBClientConnectionsPendingRequestsUnit = "{request}" - DBClientConnectionsPendingRequestsDescription = "Deprecated, use `db.client.connection.pending_requests` instead." - - // DBClientConnectionsTimeouts is the metric conforming to the - // "db.client.connections.timeouts" semantic conventions. It represents the - // deprecated, use `db.client.connection.timeouts` instead. - // Instrument: counter - // Unit: {timeout} - // Stability: Experimental - DBClientConnectionsTimeoutsName = "db.client.connections.timeouts" - DBClientConnectionsTimeoutsUnit = "{timeout}" - DBClientConnectionsTimeoutsDescription = "Deprecated, use `db.client.connection.timeouts` instead." - - // DBClientConnectionsCreateTime is the metric conforming to the - // "db.client.connections.create_time" semantic conventions. It represents the - // deprecated, use `db.client.connection.create_time` instead. Note: the unit - // also changed from `ms` to `s`. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - DBClientConnectionsCreateTimeName = "db.client.connections.create_time" - DBClientConnectionsCreateTimeUnit = "ms" - DBClientConnectionsCreateTimeDescription = "Deprecated, use `db.client.connection.create_time` instead. Note: the unit also changed from `ms` to `s`." - - // DBClientConnectionsWaitTime is the metric conforming to the - // "db.client.connections.wait_time" semantic conventions. It represents the - // deprecated, use `db.client.connection.wait_time` instead. Note: the unit - // also changed from `ms` to `s`. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - DBClientConnectionsWaitTimeName = "db.client.connections.wait_time" - DBClientConnectionsWaitTimeUnit = "ms" - DBClientConnectionsWaitTimeDescription = "Deprecated, use `db.client.connection.wait_time` instead. Note: the unit also changed from `ms` to `s`." - - // DBClientConnectionsUseTime is the metric conforming to the - // "db.client.connections.use_time" semantic conventions. It represents the - // deprecated, use `db.client.connection.use_time` instead. Note: the unit also - // changed from `ms` to `s`. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - DBClientConnectionsUseTimeName = "db.client.connections.use_time" - DBClientConnectionsUseTimeUnit = "ms" - DBClientConnectionsUseTimeDescription = "Deprecated, use `db.client.connection.use_time` instead. Note: the unit also changed from `ms` to `s`." - - // DNSLookupDuration is the metric conforming to the "dns.lookup.duration" - // semantic conventions. It represents the measures the time taken to perform a - // DNS lookup. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DNSLookupDurationName = "dns.lookup.duration" - DNSLookupDurationUnit = "s" - DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup." - - // AspnetcoreRoutingMatchAttempts is the metric conforming to the - // "aspnetcore.routing.match_attempts" semantic conventions. It represents the - // number of requests that were attempted to be matched to an endpoint. - // Instrument: counter - // Unit: {match_attempt} - // Stability: Stable - AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts" - AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}" - AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint." - - // AspnetcoreDiagnosticsExceptions is the metric conforming to the - // "aspnetcore.diagnostics.exceptions" semantic conventions. It represents the - // number of exceptions caught by exception handling middleware. - // Instrument: counter - // Unit: {exception} - // Stability: Stable - AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions" - AspnetcoreDiagnosticsExceptionsUnit = "{exception}" - AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware." - - // AspnetcoreRateLimitingActiveRequestLeases is the metric conforming to the - // "aspnetcore.rate_limiting.active_request_leases" semantic conventions. It - // represents the number of requests that are currently active on the server - // that hold a rate limiting lease. - // Instrument: updowncounter - // Unit: {request} - // Stability: Stable - AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases" - AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}" - AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease." - - // AspnetcoreRateLimitingRequestLeaseDuration is the metric conforming to the - // "aspnetcore.rate_limiting.request_lease.duration" semantic conventions. It - // represents the duration of rate limiting lease held by requests on the - // server. - // Instrument: histogram - // Unit: s - // Stability: Stable - AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration" - AspnetcoreRateLimitingRequestLeaseDurationUnit = "s" - AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server." - - // AspnetcoreRateLimitingRequestTimeInQueue is the metric conforming to the - // "aspnetcore.rate_limiting.request.time_in_queue" semantic conventions. It - // represents the time the request spent in a queue waiting to acquire a rate - // limiting lease. - // Instrument: histogram - // Unit: s - // Stability: Stable - AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue" - AspnetcoreRateLimitingRequestTimeInQueueUnit = "s" - AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease." - - // AspnetcoreRateLimitingQueuedRequests is the metric conforming to the - // "aspnetcore.rate_limiting.queued_requests" semantic conventions. It - // represents the number of requests that are currently queued, waiting to - // acquire a rate limiting lease. - // Instrument: updowncounter - // Unit: {request} - // Stability: Stable - AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests" - AspnetcoreRateLimitingQueuedRequestsUnit = "{request}" - AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease." - - // AspnetcoreRateLimitingRequests is the metric conforming to the - // "aspnetcore.rate_limiting.requests" semantic conventions. It represents the - // number of requests that tried to acquire a rate limiting lease. - // Instrument: counter - // Unit: {request} - // Stability: Stable - AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests" - AspnetcoreRateLimitingRequestsUnit = "{request}" - AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease." - - // KestrelActiveConnections is the metric conforming to the - // "kestrel.active_connections" semantic conventions. It represents the number - // of connections that are currently active on the server. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Stable - KestrelActiveConnectionsName = "kestrel.active_connections" - KestrelActiveConnectionsUnit = "{connection}" - KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server." - - // KestrelConnectionDuration is the metric conforming to the - // "kestrel.connection.duration" semantic conventions. It represents the - // duration of connections on the server. - // Instrument: histogram - // Unit: s - // Stability: Stable - KestrelConnectionDurationName = "kestrel.connection.duration" - KestrelConnectionDurationUnit = "s" - KestrelConnectionDurationDescription = "The duration of connections on the server." - - // KestrelRejectedConnections is the metric conforming to the - // "kestrel.rejected_connections" semantic conventions. It represents the - // number of connections rejected by the server. - // Instrument: counter - // Unit: {connection} - // Stability: Stable - KestrelRejectedConnectionsName = "kestrel.rejected_connections" - KestrelRejectedConnectionsUnit = "{connection}" - KestrelRejectedConnectionsDescription = "Number of connections rejected by the server." - - // KestrelQueuedConnections is the metric conforming to the - // "kestrel.queued_connections" semantic conventions. It represents the number - // of connections that are currently queued and are waiting to start. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Stable - KestrelQueuedConnectionsName = "kestrel.queued_connections" - KestrelQueuedConnectionsUnit = "{connection}" - KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start." - - // KestrelQueuedRequests is the metric conforming to the - // "kestrel.queued_requests" semantic conventions. It represents the number of - // HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are - // currently queued and are waiting to start. - // Instrument: updowncounter - // Unit: {request} - // Stability: Stable - KestrelQueuedRequestsName = "kestrel.queued_requests" - KestrelQueuedRequestsUnit = "{request}" - KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start." - - // KestrelUpgradedConnections is the metric conforming to the - // "kestrel.upgraded_connections" semantic conventions. It represents the - // number of connections that are currently upgraded (WebSockets). . - // Instrument: updowncounter - // Unit: {connection} - // Stability: Stable - KestrelUpgradedConnectionsName = "kestrel.upgraded_connections" - KestrelUpgradedConnectionsUnit = "{connection}" - KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ." - - // KestrelTLSHandshakeDuration is the metric conforming to the - // "kestrel.tls_handshake.duration" semantic conventions. It represents the - // duration of TLS handshakes on the server. - // Instrument: histogram - // Unit: s - // Stability: Stable - KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration" - KestrelTLSHandshakeDurationUnit = "s" - KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server." - - // KestrelActiveTLSHandshakes is the metric conforming to the - // "kestrel.active_tls_handshakes" semantic conventions. It represents the - // number of TLS handshakes that are currently in progress on the server. - // Instrument: updowncounter - // Unit: {handshake} - // Stability: Stable - KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes" - KestrelActiveTLSHandshakesUnit = "{handshake}" - KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server." - - // SignalrServerConnectionDuration is the metric conforming to the - // "signalr.server.connection.duration" semantic conventions. It represents the - // duration of connections on the server. - // Instrument: histogram - // Unit: s - // Stability: Stable - SignalrServerConnectionDurationName = "signalr.server.connection.duration" - SignalrServerConnectionDurationUnit = "s" - SignalrServerConnectionDurationDescription = "The duration of connections on the server." - - // SignalrServerActiveConnections is the metric conforming to the - // "signalr.server.active_connections" semantic conventions. It represents the - // number of connections that are currently active on the server. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Stable - SignalrServerActiveConnectionsName = "signalr.server.active_connections" - SignalrServerActiveConnectionsUnit = "{connection}" - SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server." - - // FaaSInvokeDuration is the metric conforming to the "faas.invoke_duration" - // semantic conventions. It represents the measures the duration of the - // function's logic execution. - // Instrument: histogram - // Unit: s - // Stability: Experimental - FaaSInvokeDurationName = "faas.invoke_duration" - FaaSInvokeDurationUnit = "s" - FaaSInvokeDurationDescription = "Measures the duration of the function's logic execution" - - // FaaSInitDuration is the metric conforming to the "faas.init_duration" - // semantic conventions. It represents the measures the duration of the - // function's initialization, such as a cold start. - // Instrument: histogram - // Unit: s - // Stability: Experimental - FaaSInitDurationName = "faas.init_duration" - FaaSInitDurationUnit = "s" - FaaSInitDurationDescription = "Measures the duration of the function's initialization, such as a cold start" - - // FaaSColdstarts is the metric conforming to the "faas.coldstarts" semantic - // conventions. It represents the number of invocation cold starts. - // Instrument: counter - // Unit: {coldstart} - // Stability: Experimental - FaaSColdstartsName = "faas.coldstarts" - FaaSColdstartsUnit = "{coldstart}" - FaaSColdstartsDescription = "Number of invocation cold starts" - - // FaaSErrors is the metric conforming to the "faas.errors" semantic - // conventions. It represents the number of invocation errors. - // Instrument: counter - // Unit: {error} - // Stability: Experimental - FaaSErrorsName = "faas.errors" - FaaSErrorsUnit = "{error}" - FaaSErrorsDescription = "Number of invocation errors" - - // FaaSInvocations is the metric conforming to the "faas.invocations" semantic - // conventions. It represents the number of successful invocations. - // Instrument: counter - // Unit: {invocation} - // Stability: Experimental - FaaSInvocationsName = "faas.invocations" - FaaSInvocationsUnit = "{invocation}" - FaaSInvocationsDescription = "Number of successful invocations" - - // FaaSTimeouts is the metric conforming to the "faas.timeouts" semantic - // conventions. It represents the number of invocation timeouts. - // Instrument: counter - // Unit: {timeout} - // Stability: Experimental - FaaSTimeoutsName = "faas.timeouts" - FaaSTimeoutsUnit = "{timeout}" - FaaSTimeoutsDescription = "Number of invocation timeouts" - - // FaaSMemUsage is the metric conforming to the "faas.mem_usage" semantic - // conventions. It represents the distribution of max memory usage per - // invocation. - // Instrument: histogram - // Unit: By - // Stability: Experimental - FaaSMemUsageName = "faas.mem_usage" - FaaSMemUsageUnit = "By" - FaaSMemUsageDescription = "Distribution of max memory usage per invocation" - - // FaaSCPUUsage is the metric conforming to the "faas.cpu_usage" semantic - // conventions. It represents the distribution of CPU usage per invocation. - // Instrument: histogram - // Unit: s - // Stability: Experimental - FaaSCPUUsageName = "faas.cpu_usage" - FaaSCPUUsageUnit = "s" - FaaSCPUUsageDescription = "Distribution of CPU usage per invocation" - - // FaaSNetIo is the metric conforming to the "faas.net_io" semantic - // conventions. It represents the distribution of net I/O usage per invocation. - // Instrument: histogram - // Unit: By - // Stability: Experimental - FaaSNetIoName = "faas.net_io" - FaaSNetIoUnit = "By" - FaaSNetIoDescription = "Distribution of net I/O usage per invocation" - - // HTTPServerRequestDuration is the metric conforming to the - // "http.server.request.duration" semantic conventions. It represents the - // duration of HTTP server requests. - // Instrument: histogram - // Unit: s - // Stability: Stable - HTTPServerRequestDurationName = "http.server.request.duration" - HTTPServerRequestDurationUnit = "s" - HTTPServerRequestDurationDescription = "Duration of HTTP server requests." - - // HTTPServerActiveRequests is the metric conforming to the - // "http.server.active_requests" semantic conventions. It represents the number - // of active HTTP server requests. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - HTTPServerActiveRequestsName = "http.server.active_requests" - HTTPServerActiveRequestsUnit = "{request}" - HTTPServerActiveRequestsDescription = "Number of active HTTP server requests." - - // HTTPServerRequestBodySize is the metric conforming to the - // "http.server.request.body.size" semantic conventions. It represents the size - // of HTTP server request bodies. - // Instrument: histogram - // Unit: By - // Stability: Experimental - HTTPServerRequestBodySizeName = "http.server.request.body.size" - HTTPServerRequestBodySizeUnit = "By" - HTTPServerRequestBodySizeDescription = "Size of HTTP server request bodies." - - // HTTPServerResponseBodySize is the metric conforming to the - // "http.server.response.body.size" semantic conventions. It represents the - // size of HTTP server response bodies. - // Instrument: histogram - // Unit: By - // Stability: Experimental - HTTPServerResponseBodySizeName = "http.server.response.body.size" - HTTPServerResponseBodySizeUnit = "By" - HTTPServerResponseBodySizeDescription = "Size of HTTP server response bodies." - - // HTTPClientRequestDuration is the metric conforming to the - // "http.client.request.duration" semantic conventions. It represents the - // duration of HTTP client requests. - // Instrument: histogram - // Unit: s - // Stability: Stable - HTTPClientRequestDurationName = "http.client.request.duration" - HTTPClientRequestDurationUnit = "s" - HTTPClientRequestDurationDescription = "Duration of HTTP client requests." - - // HTTPClientRequestBodySize is the metric conforming to the - // "http.client.request.body.size" semantic conventions. It represents the size - // of HTTP client request bodies. - // Instrument: histogram - // Unit: By - // Stability: Experimental - HTTPClientRequestBodySizeName = "http.client.request.body.size" - HTTPClientRequestBodySizeUnit = "By" - HTTPClientRequestBodySizeDescription = "Size of HTTP client request bodies." - - // HTTPClientResponseBodySize is the metric conforming to the - // "http.client.response.body.size" semantic conventions. It represents the - // size of HTTP client response bodies. - // Instrument: histogram - // Unit: By - // Stability: Experimental - HTTPClientResponseBodySizeName = "http.client.response.body.size" - HTTPClientResponseBodySizeUnit = "By" - HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies." - - // HTTPClientOpenConnections is the metric conforming to the - // "http.client.open_connections" semantic conventions. It represents the - // number of outbound HTTP connections that are currently active or idle on the - // client. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - HTTPClientOpenConnectionsName = "http.client.open_connections" - HTTPClientOpenConnectionsUnit = "{connection}" - HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client." - - // HTTPClientConnectionDuration is the metric conforming to the - // "http.client.connection.duration" semantic conventions. It represents the - // duration of the successfully established outbound HTTP connections. - // Instrument: histogram - // Unit: s - // Stability: Experimental - HTTPClientConnectionDurationName = "http.client.connection.duration" - HTTPClientConnectionDurationUnit = "s" - HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections." - - // HTTPClientActiveRequests is the metric conforming to the - // "http.client.active_requests" semantic conventions. It represents the number - // of active HTTP requests. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - HTTPClientActiveRequestsName = "http.client.active_requests" - HTTPClientActiveRequestsUnit = "{request}" - HTTPClientActiveRequestsDescription = "Number of active HTTP requests." - - // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic - // conventions. It represents the measure of initial memory requested. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - JvmMemoryInitName = "jvm.memory.init" - JvmMemoryInitUnit = "By" - JvmMemoryInitDescription = "Measure of initial memory requested." - - // JvmSystemCPUUtilization is the metric conforming to the - // "jvm.system.cpu.utilization" semantic conventions. It represents the recent - // CPU utilization for the whole system as reported by the JVM. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - JvmSystemCPUUtilizationName = "jvm.system.cpu.utilization" - JvmSystemCPUUtilizationUnit = "1" - JvmSystemCPUUtilizationDescription = "Recent CPU utilization for the whole system as reported by the JVM." - - // JvmSystemCPULoad1m is the metric conforming to the "jvm.system.cpu.load_1m" - // semantic conventions. It represents the average CPU load of the whole system - // for the last minute as reported by the JVM. - // Instrument: gauge - // Unit: {run_queue_item} - // Stability: Experimental - JvmSystemCPULoad1mName = "jvm.system.cpu.load_1m" - JvmSystemCPULoad1mUnit = "{run_queue_item}" - JvmSystemCPULoad1mDescription = "Average CPU load of the whole system for the last minute as reported by the JVM." - - // JvmBufferMemoryUsage is the metric conforming to the - // "jvm.buffer.memory.usage" semantic conventions. It represents the measure of - // memory used by buffers. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - JvmBufferMemoryUsageName = "jvm.buffer.memory.usage" - JvmBufferMemoryUsageUnit = "By" - JvmBufferMemoryUsageDescription = "Measure of memory used by buffers." - - // JvmBufferMemoryLimit is the metric conforming to the - // "jvm.buffer.memory.limit" semantic conventions. It represents the measure of - // total memory capacity of buffers. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - JvmBufferMemoryLimitName = "jvm.buffer.memory.limit" - JvmBufferMemoryLimitUnit = "By" - JvmBufferMemoryLimitDescription = "Measure of total memory capacity of buffers." - - // JvmBufferCount is the metric conforming to the "jvm.buffer.count" semantic - // conventions. It represents the number of buffers in the pool. - // Instrument: updowncounter - // Unit: {buffer} - // Stability: Experimental - JvmBufferCountName = "jvm.buffer.count" - JvmBufferCountUnit = "{buffer}" - JvmBufferCountDescription = "Number of buffers in the pool." - - // JvmMemoryUsed is the metric conforming to the "jvm.memory.used" semantic - // conventions. It represents the measure of memory used. - // Instrument: updowncounter - // Unit: By - // Stability: Stable - JvmMemoryUsedName = "jvm.memory.used" - JvmMemoryUsedUnit = "By" - JvmMemoryUsedDescription = "Measure of memory used." - - // JvmMemoryCommitted is the metric conforming to the "jvm.memory.committed" - // semantic conventions. It represents the measure of memory committed. - // Instrument: updowncounter - // Unit: By - // Stability: Stable - JvmMemoryCommittedName = "jvm.memory.committed" - JvmMemoryCommittedUnit = "By" - JvmMemoryCommittedDescription = "Measure of memory committed." - - // JvmMemoryLimit is the metric conforming to the "jvm.memory.limit" semantic - // conventions. It represents the measure of max obtainable memory. - // Instrument: updowncounter - // Unit: By - // Stability: Stable - JvmMemoryLimitName = "jvm.memory.limit" - JvmMemoryLimitUnit = "By" - JvmMemoryLimitDescription = "Measure of max obtainable memory." - - // JvmMemoryUsedAfterLastGc is the metric conforming to the - // "jvm.memory.used_after_last_gc" semantic conventions. It represents the - // measure of memory used, as measured after the most recent garbage collection - // event on this pool. - // Instrument: updowncounter - // Unit: By - // Stability: Stable - JvmMemoryUsedAfterLastGcName = "jvm.memory.used_after_last_gc" - JvmMemoryUsedAfterLastGcUnit = "By" - JvmMemoryUsedAfterLastGcDescription = "Measure of memory used, as measured after the most recent garbage collection event on this pool." - - // JvmGcDuration is the metric conforming to the "jvm.gc.duration" semantic - // conventions. It represents the duration of JVM garbage collection actions. - // Instrument: histogram - // Unit: s - // Stability: Stable - JvmGcDurationName = "jvm.gc.duration" - JvmGcDurationUnit = "s" - JvmGcDurationDescription = "Duration of JVM garbage collection actions." - - // JvmThreadCount is the metric conforming to the "jvm.thread.count" semantic - // conventions. It represents the number of executing platform threads. - // Instrument: updowncounter - // Unit: {thread} - // Stability: Stable - JvmThreadCountName = "jvm.thread.count" - JvmThreadCountUnit = "{thread}" - JvmThreadCountDescription = "Number of executing platform threads." - - // JvmClassLoaded is the metric conforming to the "jvm.class.loaded" semantic - // conventions. It represents the number of classes loaded since JVM start. - // Instrument: counter - // Unit: {class} - // Stability: Stable - JvmClassLoadedName = "jvm.class.loaded" - JvmClassLoadedUnit = "{class}" - JvmClassLoadedDescription = "Number of classes loaded since JVM start." - - // JvmClassUnloaded is the metric conforming to the "jvm.class.unloaded" - // semantic conventions. It represents the number of classes unloaded since JVM - // start. - // Instrument: counter - // Unit: {class} - // Stability: Stable - JvmClassUnloadedName = "jvm.class.unloaded" - JvmClassUnloadedUnit = "{class}" - JvmClassUnloadedDescription = "Number of classes unloaded since JVM start." - - // JvmClassCount is the metric conforming to the "jvm.class.count" semantic - // conventions. It represents the number of classes currently loaded. - // Instrument: updowncounter - // Unit: {class} - // Stability: Stable - JvmClassCountName = "jvm.class.count" - JvmClassCountUnit = "{class}" - JvmClassCountDescription = "Number of classes currently loaded." - - // JvmCPUCount is the metric conforming to the "jvm.cpu.count" semantic - // conventions. It represents the number of processors available to the Java - // virtual machine. - // Instrument: updowncounter - // Unit: {cpu} - // Stability: Stable - JvmCPUCountName = "jvm.cpu.count" - JvmCPUCountUnit = "{cpu}" - JvmCPUCountDescription = "Number of processors available to the Java virtual machine." - - // JvmCPUTime is the metric conforming to the "jvm.cpu.time" semantic - // conventions. It represents the cPU time used by the process as reported by - // the JVM. - // Instrument: counter - // Unit: s - // Stability: Stable - JvmCPUTimeName = "jvm.cpu.time" - JvmCPUTimeUnit = "s" - JvmCPUTimeDescription = "CPU time used by the process as reported by the JVM." - - // JvmCPURecentUtilization is the metric conforming to the - // "jvm.cpu.recent_utilization" semantic conventions. It represents the recent - // CPU utilization for the process as reported by the JVM. - // Instrument: gauge - // Unit: 1 - // Stability: Stable - JvmCPURecentUtilizationName = "jvm.cpu.recent_utilization" - JvmCPURecentUtilizationUnit = "1" - JvmCPURecentUtilizationDescription = "Recent CPU utilization for the process as reported by the JVM." - - // MessagingPublishDuration is the metric conforming to the - // "messaging.publish.duration" semantic conventions. It represents the - // measures the duration of publish operation. - // Instrument: histogram - // Unit: s - // Stability: Experimental - MessagingPublishDurationName = "messaging.publish.duration" - MessagingPublishDurationUnit = "s" - MessagingPublishDurationDescription = "Measures the duration of publish operation." - - // MessagingReceiveDuration is the metric conforming to the - // "messaging.receive.duration" semantic conventions. It represents the - // measures the duration of receive operation. - // Instrument: histogram - // Unit: s - // Stability: Experimental - MessagingReceiveDurationName = "messaging.receive.duration" - MessagingReceiveDurationUnit = "s" - MessagingReceiveDurationDescription = "Measures the duration of receive operation." - - // MessagingProcessDuration is the metric conforming to the - // "messaging.process.duration" semantic conventions. It represents the - // measures the duration of process operation. - // Instrument: histogram - // Unit: s - // Stability: Experimental - MessagingProcessDurationName = "messaging.process.duration" - MessagingProcessDurationUnit = "s" - MessagingProcessDurationDescription = "Measures the duration of process operation." - - // MessagingPublishMessages is the metric conforming to the - // "messaging.publish.messages" semantic conventions. It represents the - // measures the number of published messages. - // Instrument: counter - // Unit: {message} - // Stability: Experimental - MessagingPublishMessagesName = "messaging.publish.messages" - MessagingPublishMessagesUnit = "{message}" - MessagingPublishMessagesDescription = "Measures the number of published messages." - - // MessagingReceiveMessages is the metric conforming to the - // "messaging.receive.messages" semantic conventions. It represents the - // measures the number of received messages. - // Instrument: counter - // Unit: {message} - // Stability: Experimental - MessagingReceiveMessagesName = "messaging.receive.messages" - MessagingReceiveMessagesUnit = "{message}" - MessagingReceiveMessagesDescription = "Measures the number of received messages." - - // MessagingProcessMessages is the metric conforming to the - // "messaging.process.messages" semantic conventions. It represents the - // measures the number of processed messages. - // Instrument: counter - // Unit: {message} - // Stability: Experimental - MessagingProcessMessagesName = "messaging.process.messages" - MessagingProcessMessagesUnit = "{message}" - MessagingProcessMessagesDescription = "Measures the number of processed messages." - - // ProcessCPUTime is the metric conforming to the "process.cpu.time" semantic - // conventions. It represents the total CPU seconds broken down by different - // states. - // Instrument: counter - // Unit: s - // Stability: Experimental - ProcessCPUTimeName = "process.cpu.time" - ProcessCPUTimeUnit = "s" - ProcessCPUTimeDescription = "Total CPU seconds broken down by different states." - - // ProcessCPUUtilization is the metric conforming to the - // "process.cpu.utilization" semantic conventions. It represents the difference - // in process.cpu.time since the last measurement, divided by the elapsed time - // and number of CPUs available to the process. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - ProcessCPUUtilizationName = "process.cpu.utilization" - ProcessCPUUtilizationUnit = "1" - ProcessCPUUtilizationDescription = "Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process." - - // ProcessMemoryUsage is the metric conforming to the "process.memory.usage" - // semantic conventions. It represents the amount of physical memory in use. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - ProcessMemoryUsageName = "process.memory.usage" - ProcessMemoryUsageUnit = "By" - ProcessMemoryUsageDescription = "The amount of physical memory in use." - - // ProcessMemoryVirtual is the metric conforming to the - // "process.memory.virtual" semantic conventions. It represents the amount of - // committed virtual memory. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - ProcessMemoryVirtualName = "process.memory.virtual" - ProcessMemoryVirtualUnit = "By" - ProcessMemoryVirtualDescription = "The amount of committed virtual memory." - - // ProcessDiskIo is the metric conforming to the "process.disk.io" semantic - // conventions. It represents the disk bytes transferred. - // Instrument: counter - // Unit: By - // Stability: Experimental - ProcessDiskIoName = "process.disk.io" - ProcessDiskIoUnit = "By" - ProcessDiskIoDescription = "Disk bytes transferred." - - // ProcessNetworkIo is the metric conforming to the "process.network.io" - // semantic conventions. It represents the network bytes transferred. - // Instrument: counter - // Unit: By - // Stability: Experimental - ProcessNetworkIoName = "process.network.io" - ProcessNetworkIoUnit = "By" - ProcessNetworkIoDescription = "Network bytes transferred." - - // ProcessThreadCount is the metric conforming to the "process.thread.count" - // semantic conventions. It represents the process threads count. - // Instrument: updowncounter - // Unit: {thread} - // Stability: Experimental - ProcessThreadCountName = "process.thread.count" - ProcessThreadCountUnit = "{thread}" - ProcessThreadCountDescription = "Process threads count." - - // ProcessOpenFileDescriptorCount is the metric conforming to the - // "process.open_file_descriptor.count" semantic conventions. It represents the - // number of file descriptors in use by the process. - // Instrument: updowncounter - // Unit: {count} - // Stability: Experimental - ProcessOpenFileDescriptorCountName = "process.open_file_descriptor.count" - ProcessOpenFileDescriptorCountUnit = "{count}" - ProcessOpenFileDescriptorCountDescription = "Number of file descriptors in use by the process." - - // ProcessContextSwitches is the metric conforming to the - // "process.context_switches" semantic conventions. It represents the number of - // times the process has been context switched. - // Instrument: counter - // Unit: {count} - // Stability: Experimental - ProcessContextSwitchesName = "process.context_switches" - ProcessContextSwitchesUnit = "{count}" - ProcessContextSwitchesDescription = "Number of times the process has been context switched." - - // ProcessPagingFaults is the metric conforming to the "process.paging.faults" - // semantic conventions. It represents the number of page faults the process - // has made. - // Instrument: counter - // Unit: {fault} - // Stability: Experimental - ProcessPagingFaultsName = "process.paging.faults" - ProcessPagingFaultsUnit = "{fault}" - ProcessPagingFaultsDescription = "Number of page faults the process has made." - - // RPCServerDuration is the metric conforming to the "rpc.server.duration" - // semantic conventions. It represents the measures the duration of inbound - // RPC. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - RPCServerDurationName = "rpc.server.duration" - RPCServerDurationUnit = "ms" - RPCServerDurationDescription = "Measures the duration of inbound RPC." - - // RPCServerRequestSize is the metric conforming to the - // "rpc.server.request.size" semantic conventions. It represents the measures - // the size of RPC request messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: Experimental - RPCServerRequestSizeName = "rpc.server.request.size" - RPCServerRequestSizeUnit = "By" - RPCServerRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." - - // RPCServerResponseSize is the metric conforming to the - // "rpc.server.response.size" semantic conventions. It represents the measures - // the size of RPC response messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: Experimental - RPCServerResponseSizeName = "rpc.server.response.size" - RPCServerResponseSizeUnit = "By" - RPCServerResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." - - // RPCServerRequestsPerRPC is the metric conforming to the - // "rpc.server.requests_per_rpc" semantic conventions. It represents the - // measures the number of messages received per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: Experimental - RPCServerRequestsPerRPCName = "rpc.server.requests_per_rpc" - RPCServerRequestsPerRPCUnit = "{count}" - RPCServerRequestsPerRPCDescription = "Measures the number of messages received per RPC." - - // RPCServerResponsesPerRPC is the metric conforming to the - // "rpc.server.responses_per_rpc" semantic conventions. It represents the - // measures the number of messages sent per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: Experimental - RPCServerResponsesPerRPCName = "rpc.server.responses_per_rpc" - RPCServerResponsesPerRPCUnit = "{count}" - RPCServerResponsesPerRPCDescription = "Measures the number of messages sent per RPC." - - // RPCClientDuration is the metric conforming to the "rpc.client.duration" - // semantic conventions. It represents the measures the duration of outbound - // RPC. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - RPCClientDurationName = "rpc.client.duration" - RPCClientDurationUnit = "ms" - RPCClientDurationDescription = "Measures the duration of outbound RPC." - - // RPCClientRequestSize is the metric conforming to the - // "rpc.client.request.size" semantic conventions. It represents the measures - // the size of RPC request messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: Experimental - RPCClientRequestSizeName = "rpc.client.request.size" - RPCClientRequestSizeUnit = "By" - RPCClientRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." - - // RPCClientResponseSize is the metric conforming to the - // "rpc.client.response.size" semantic conventions. It represents the measures - // the size of RPC response messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: Experimental - RPCClientResponseSizeName = "rpc.client.response.size" - RPCClientResponseSizeUnit = "By" - RPCClientResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." - - // RPCClientRequestsPerRPC is the metric conforming to the - // "rpc.client.requests_per_rpc" semantic conventions. It represents the - // measures the number of messages received per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: Experimental - RPCClientRequestsPerRPCName = "rpc.client.requests_per_rpc" - RPCClientRequestsPerRPCUnit = "{count}" - RPCClientRequestsPerRPCDescription = "Measures the number of messages received per RPC." - - // RPCClientResponsesPerRPC is the metric conforming to the - // "rpc.client.responses_per_rpc" semantic conventions. It represents the - // measures the number of messages sent per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: Experimental - RPCClientResponsesPerRPCName = "rpc.client.responses_per_rpc" - RPCClientResponsesPerRPCUnit = "{count}" - RPCClientResponsesPerRPCDescription = "Measures the number of messages sent per RPC." - - // SystemCPUTime is the metric conforming to the "system.cpu.time" semantic - // conventions. It represents the seconds each logical CPU spent on each mode. - // Instrument: counter - // Unit: s - // Stability: Experimental - SystemCPUTimeName = "system.cpu.time" - SystemCPUTimeUnit = "s" - SystemCPUTimeDescription = "Seconds each logical CPU spent on each mode" - - // SystemCPUUtilization is the metric conforming to the - // "system.cpu.utilization" semantic conventions. It represents the difference - // in system.cpu.time since the last measurement, divided by the elapsed time - // and number of logical CPUs. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - SystemCPUUtilizationName = "system.cpu.utilization" - SystemCPUUtilizationUnit = "1" - SystemCPUUtilizationDescription = "Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs" - - // SystemCPUFrequency is the metric conforming to the "system.cpu.frequency" - // semantic conventions. It represents the reports the current frequency of the - // CPU in Hz. - // Instrument: gauge - // Unit: {Hz} - // Stability: Experimental - SystemCPUFrequencyName = "system.cpu.frequency" - SystemCPUFrequencyUnit = "{Hz}" - SystemCPUFrequencyDescription = "Reports the current frequency of the CPU in Hz" - - // SystemCPUPhysicalCount is the metric conforming to the - // "system.cpu.physical.count" semantic conventions. It represents the reports - // the number of actual physical processor cores on the hardware. - // Instrument: updowncounter - // Unit: {cpu} - // Stability: Experimental - SystemCPUPhysicalCountName = "system.cpu.physical.count" - SystemCPUPhysicalCountUnit = "{cpu}" - SystemCPUPhysicalCountDescription = "Reports the number of actual physical processor cores on the hardware" - - // SystemCPULogicalCount is the metric conforming to the - // "system.cpu.logical.count" semantic conventions. It represents the reports - // the number of logical (virtual) processor cores created by the operating - // system to manage multitasking. - // Instrument: updowncounter - // Unit: {cpu} - // Stability: Experimental - SystemCPULogicalCountName = "system.cpu.logical.count" - SystemCPULogicalCountUnit = "{cpu}" - SystemCPULogicalCountDescription = "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking" - - // SystemMemoryUsage is the metric conforming to the "system.memory.usage" - // semantic conventions. It represents the reports memory in use by state. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemMemoryUsageName = "system.memory.usage" - SystemMemoryUsageUnit = "By" - SystemMemoryUsageDescription = "Reports memory in use by state." - - // SystemMemoryLimit is the metric conforming to the "system.memory.limit" - // semantic conventions. It represents the total memory available in the - // system. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemMemoryLimitName = "system.memory.limit" - SystemMemoryLimitUnit = "By" - SystemMemoryLimitDescription = "Total memory available in the system." - - // SystemMemoryShared is the metric conforming to the "system.memory.shared" - // semantic conventions. It represents the shared memory used (mostly by - // tmpfs). - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemMemorySharedName = "system.memory.shared" - SystemMemorySharedUnit = "By" - SystemMemorySharedDescription = "Shared memory used (mostly by tmpfs)." - - // SystemMemoryUtilization is the metric conforming to the - // "system.memory.utilization" semantic conventions. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemMemoryUtilizationName = "system.memory.utilization" - SystemMemoryUtilizationUnit = "1" - - // SystemPagingUsage is the metric conforming to the "system.paging.usage" - // semantic conventions. It represents the unix swap or windows pagefile usage. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemPagingUsageName = "system.paging.usage" - SystemPagingUsageUnit = "By" - SystemPagingUsageDescription = "Unix swap or windows pagefile usage" - - // SystemPagingUtilization is the metric conforming to the - // "system.paging.utilization" semantic conventions. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemPagingUtilizationName = "system.paging.utilization" - SystemPagingUtilizationUnit = "1" - - // SystemPagingFaults is the metric conforming to the "system.paging.faults" - // semantic conventions. - // Instrument: counter - // Unit: {fault} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemPagingFaultsName = "system.paging.faults" - SystemPagingFaultsUnit = "{fault}" - - // SystemPagingOperations is the metric conforming to the - // "system.paging.operations" semantic conventions. - // Instrument: counter - // Unit: {operation} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemPagingOperationsName = "system.paging.operations" - SystemPagingOperationsUnit = "{operation}" - - // SystemDiskIo is the metric conforming to the "system.disk.io" semantic - // conventions. - // Instrument: counter - // Unit: By - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemDiskIoName = "system.disk.io" - SystemDiskIoUnit = "By" - - // SystemDiskOperations is the metric conforming to the - // "system.disk.operations" semantic conventions. - // Instrument: counter - // Unit: {operation} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemDiskOperationsName = "system.disk.operations" - SystemDiskOperationsUnit = "{operation}" - - // SystemDiskIoTime is the metric conforming to the "system.disk.io_time" - // semantic conventions. It represents the time disk spent activated. - // Instrument: counter - // Unit: s - // Stability: Experimental - SystemDiskIoTimeName = "system.disk.io_time" - SystemDiskIoTimeUnit = "s" - SystemDiskIoTimeDescription = "Time disk spent activated" - - // SystemDiskOperationTime is the metric conforming to the - // "system.disk.operation_time" semantic conventions. It represents the sum of - // the time each operation took to complete. - // Instrument: counter - // Unit: s - // Stability: Experimental - SystemDiskOperationTimeName = "system.disk.operation_time" - SystemDiskOperationTimeUnit = "s" - SystemDiskOperationTimeDescription = "Sum of the time each operation took to complete" - - // SystemDiskMerged is the metric conforming to the "system.disk.merged" - // semantic conventions. - // Instrument: counter - // Unit: {operation} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemDiskMergedName = "system.disk.merged" - SystemDiskMergedUnit = "{operation}" - - // SystemFilesystemUsage is the metric conforming to the - // "system.filesystem.usage" semantic conventions. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemFilesystemUsageName = "system.filesystem.usage" - SystemFilesystemUsageUnit = "By" - - // SystemFilesystemUtilization is the metric conforming to the - // "system.filesystem.utilization" semantic conventions. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemFilesystemUtilizationName = "system.filesystem.utilization" - SystemFilesystemUtilizationUnit = "1" - - // SystemNetworkDropped is the metric conforming to the - // "system.network.dropped" semantic conventions. It represents the count of - // packets that are dropped or discarded even though there was no error. - // Instrument: counter - // Unit: {packet} - // Stability: Experimental - SystemNetworkDroppedName = "system.network.dropped" - SystemNetworkDroppedUnit = "{packet}" - SystemNetworkDroppedDescription = "Count of packets that are dropped or discarded even though there was no error" - - // SystemNetworkPackets is the metric conforming to the - // "system.network.packets" semantic conventions. - // Instrument: counter - // Unit: {packet} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemNetworkPacketsName = "system.network.packets" - SystemNetworkPacketsUnit = "{packet}" - - // SystemNetworkErrors is the metric conforming to the "system.network.errors" - // semantic conventions. It represents the count of network errors detected. - // Instrument: counter - // Unit: {error} - // Stability: Experimental - SystemNetworkErrorsName = "system.network.errors" - SystemNetworkErrorsUnit = "{error}" - SystemNetworkErrorsDescription = "Count of network errors detected" - - // SystemNetworkIo is the metric conforming to the "system.network.io" semantic - // conventions. - // Instrument: counter - // Unit: By - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemNetworkIoName = "system.network.io" - SystemNetworkIoUnit = "By" - - // SystemNetworkConnections is the metric conforming to the - // "system.network.connections" semantic conventions. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemNetworkConnectionsName = "system.network.connections" - SystemNetworkConnectionsUnit = "{connection}" - - // SystemProcessCount is the metric conforming to the "system.process.count" - // semantic conventions. It represents the total number of processes in each - // state. - // Instrument: updowncounter - // Unit: {process} - // Stability: Experimental - SystemProcessCountName = "system.process.count" - SystemProcessCountUnit = "{process}" - SystemProcessCountDescription = "Total number of processes in each state" - - // SystemProcessCreated is the metric conforming to the - // "system.process.created" semantic conventions. It represents the total - // number of processes created over uptime of the host. - // Instrument: counter - // Unit: {process} - // Stability: Experimental - SystemProcessCreatedName = "system.process.created" - SystemProcessCreatedUnit = "{process}" - SystemProcessCreatedDescription = "Total number of processes created over uptime of the host" - - // SystemLinuxMemoryAvailable is the metric conforming to the - // "system.linux.memory.available" semantic conventions. It represents an - // estimate of how much memory is available for starting new applications, - // without causing swapping. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemLinuxMemoryAvailableName = "system.linux.memory.available" - SystemLinuxMemoryAvailableUnit = "By" - SystemLinuxMemoryAvailableDescription = "An estimate of how much memory is available for starting new applications, without causing swapping" -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go deleted file mode 100644 index 4c87c7adcc7..00000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" - -// SchemaURL is the schema URL that matches the version of the semantic conventions -// that this package defines. Semconv packages starting from v1.4.0 must declare -// non-empty schema URL in the form https://opentelemetry.io/schemas/ -const SchemaURL = "https://opentelemetry.io/schemas/1.26.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go index 666bded4baf..267979c051d 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go @@ -4,28 +4,53 @@ package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0" import ( - "fmt" "reflect" "go.opentelemetry.io/otel/attribute" ) // ErrorType returns an [attribute.KeyValue] identifying the error type of err. +// +// If err is nil, the returned attribute has the default value +// [ErrorTypeOther]. +// +// If err's type has the method +// +// ErrorType() string +// +// then the returned attribute has the value of err.ErrorType(). Otherwise, the +// returned attribute has a value derived from the concrete type of err. +// +// The key of the returned attribute is [ErrorTypeKey]. func ErrorType(err error) attribute.KeyValue { if err == nil { return ErrorTypeOther } - t := reflect.TypeOf(err) - var value string - if t.PkgPath() == "" && t.Name() == "" { - // Likely a builtin type. - value = t.String() - } else { - value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) - } - if value == "" { - return ErrorTypeOther - } - return ErrorTypeKey.String(value) + return ErrorTypeKey.String(errorType(err)) +} + +func errorType(err error) string { + var s string + if et, ok := err.(interface{ ErrorType() string }); ok { + // Prioritize the ErrorType method if available. + s = et.ErrorType() + } + if s == "" { + // Fallback to reflection if the ErrorType method is not supported or + // returns an empty value. + + t := reflect.TypeOf(err) + pkg, name := t.PkgPath(), t.Name() + if pkg != "" && name != "" { + s = pkg + "." + name + } else { + // The type has no package path or name (predeclared, not-defined, + // or alias for a not-defined type). + // + // This is not guaranteed to be unique, but is a best effort. + s = t.String() + } + } + return s } diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/httpconv/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/httpconv/metric.go index 55bde895ddd..a0ddf652d34 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/httpconv/metric.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/httpconv/metric.go @@ -91,6 +91,11 @@ type ClientActiveRequests struct { metric.Int64UpDownCounter } +var newClientActiveRequestsOpts = []metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of active HTTP requests."), + metric.WithUnit("{request}"), +} + // NewClientActiveRequests returns a new ClientActiveRequests instrument. func NewClientActiveRequests( m metric.Meter, @@ -101,15 +106,18 @@ func NewClientActiveRequests( return ClientActiveRequests{noop.Int64UpDownCounter{}}, nil } + if len(opt) == 0 { + opt = newClientActiveRequestsOpts + } else { + opt = append(opt, newClientActiveRequestsOpts...) + } + i, err := m.Int64UpDownCounter( "http.client.active_requests", - append([]metric.Int64UpDownCounterOption{ - metric.WithDescription("Number of active HTTP requests."), - metric.WithUnit("{request}"), - }, opt...)..., + opt..., ) if err != nil { - return ClientActiveRequests{noop.Int64UpDownCounter{}}, err + return ClientActiveRequests{noop.Int64UpDownCounter{}}, err } return ClientActiveRequests{i}, nil } @@ -223,6 +231,11 @@ type ClientConnectionDuration struct { metric.Float64Histogram } +var newClientConnectionDurationOpts = []metric.Float64HistogramOption{ + metric.WithDescription("The duration of the successfully established outbound HTTP connections."), + metric.WithUnit("s"), +} + // NewClientConnectionDuration returns a new ClientConnectionDuration instrument. func NewClientConnectionDuration( m metric.Meter, @@ -233,15 +246,18 @@ func NewClientConnectionDuration( return ClientConnectionDuration{noop.Float64Histogram{}}, nil } + if len(opt) == 0 { + opt = newClientConnectionDurationOpts + } else { + opt = append(opt, newClientConnectionDurationOpts...) + } + i, err := m.Float64Histogram( "http.client.connection.duration", - append([]metric.Float64HistogramOption{ - metric.WithDescription("The duration of the successfully established outbound HTTP connections."), - metric.WithUnit("s"), - }, opt...)..., + opt..., ) if err != nil { - return ClientConnectionDuration{noop.Float64Histogram{}}, err + return ClientConnectionDuration{noop.Float64Histogram{}}, err } return ClientConnectionDuration{i}, nil } @@ -310,6 +326,7 @@ func (m ClientConnectionDuration) Record( func (m ClientConnectionDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { if set.Len() == 0 { m.Float64Histogram.Record(ctx, val) + return } o := recOptPool.Get().(*[]metric.RecordOption) @@ -353,6 +370,11 @@ type ClientOpenConnections struct { metric.Int64UpDownCounter } +var newClientOpenConnectionsOpts = []metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of outbound HTTP connections that are currently active or idle on the client."), + metric.WithUnit("{connection}"), +} + // NewClientOpenConnections returns a new ClientOpenConnections instrument. func NewClientOpenConnections( m metric.Meter, @@ -363,15 +385,18 @@ func NewClientOpenConnections( return ClientOpenConnections{noop.Int64UpDownCounter{}}, nil } + if len(opt) == 0 { + opt = newClientOpenConnectionsOpts + } else { + opt = append(opt, newClientOpenConnectionsOpts...) + } + i, err := m.Int64UpDownCounter( "http.client.open_connections", - append([]metric.Int64UpDownCounterOption{ - metric.WithDescription("Number of outbound HTTP connections that are currently active or idle on the client."), - metric.WithUnit("{connection}"), - }, opt...)..., + opt..., ) if err != nil { - return ClientOpenConnections{noop.Int64UpDownCounter{}}, err + return ClientOpenConnections{noop.Int64UpDownCounter{}}, err } return ClientOpenConnections{i}, nil } @@ -488,6 +513,11 @@ type ClientRequestBodySize struct { metric.Int64Histogram } +var newClientRequestBodySizeOpts = []metric.Int64HistogramOption{ + metric.WithDescription("Size of HTTP client request bodies."), + metric.WithUnit("By"), +} + // NewClientRequestBodySize returns a new ClientRequestBodySize instrument. func NewClientRequestBodySize( m metric.Meter, @@ -498,15 +528,18 @@ func NewClientRequestBodySize( return ClientRequestBodySize{noop.Int64Histogram{}}, nil } + if len(opt) == 0 { + opt = newClientRequestBodySizeOpts + } else { + opt = append(opt, newClientRequestBodySizeOpts...) + } + i, err := m.Int64Histogram( "http.client.request.body.size", - append([]metric.Int64HistogramOption{ - metric.WithDescription("Size of HTTP client request bodies."), - metric.WithUnit("By"), - }, opt...)..., + opt..., ) if err != nil { - return ClientRequestBodySize{noop.Int64Histogram{}}, err + return ClientRequestBodySize{noop.Int64Histogram{}}, err } return ClientRequestBodySize{i}, nil } @@ -593,6 +626,7 @@ func (m ClientRequestBodySize) Record( func (m ClientRequestBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) { if set.Len() == 0 { m.Int64Histogram.Record(ctx, val) + return } o := recOptPool.Get().(*[]metric.RecordOption) @@ -662,6 +696,11 @@ type ClientRequestDuration struct { metric.Float64Histogram } +var newClientRequestDurationOpts = []metric.Float64HistogramOption{ + metric.WithDescription("Duration of HTTP client requests."), + metric.WithUnit("s"), +} + // NewClientRequestDuration returns a new ClientRequestDuration instrument. func NewClientRequestDuration( m metric.Meter, @@ -672,15 +711,18 @@ func NewClientRequestDuration( return ClientRequestDuration{noop.Float64Histogram{}}, nil } + if len(opt) == 0 { + opt = newClientRequestDurationOpts + } else { + opt = append(opt, newClientRequestDurationOpts...) + } + i, err := m.Float64Histogram( "http.client.request.duration", - append([]metric.Float64HistogramOption{ - metric.WithDescription("Duration of HTTP client requests."), - metric.WithUnit("s"), - }, opt...)..., + opt..., ) if err != nil { - return ClientRequestDuration{noop.Float64Histogram{}}, err + return ClientRequestDuration{noop.Float64Histogram{}}, err } return ClientRequestDuration{i}, nil } @@ -753,6 +795,7 @@ func (m ClientRequestDuration) Record( func (m ClientRequestDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { if set.Len() == 0 { m.Float64Histogram.Record(ctx, val) + return } o := recOptPool.Get().(*[]metric.RecordOption) @@ -822,6 +865,11 @@ type ClientResponseBodySize struct { metric.Int64Histogram } +var newClientResponseBodySizeOpts = []metric.Int64HistogramOption{ + metric.WithDescription("Size of HTTP client response bodies."), + metric.WithUnit("By"), +} + // NewClientResponseBodySize returns a new ClientResponseBodySize instrument. func NewClientResponseBodySize( m metric.Meter, @@ -832,15 +880,18 @@ func NewClientResponseBodySize( return ClientResponseBodySize{noop.Int64Histogram{}}, nil } + if len(opt) == 0 { + opt = newClientResponseBodySizeOpts + } else { + opt = append(opt, newClientResponseBodySizeOpts...) + } + i, err := m.Int64Histogram( "http.client.response.body.size", - append([]metric.Int64HistogramOption{ - metric.WithDescription("Size of HTTP client response bodies."), - metric.WithUnit("By"), - }, opt...)..., + opt..., ) if err != nil { - return ClientResponseBodySize{noop.Int64Histogram{}}, err + return ClientResponseBodySize{noop.Int64Histogram{}}, err } return ClientResponseBodySize{i}, nil } @@ -927,6 +978,7 @@ func (m ClientResponseBodySize) Record( func (m ClientResponseBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) { if set.Len() == 0 { m.Int64Histogram.Record(ctx, val) + return } o := recOptPool.Get().(*[]metric.RecordOption) @@ -996,6 +1048,11 @@ type ServerActiveRequests struct { metric.Int64UpDownCounter } +var newServerActiveRequestsOpts = []metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of active HTTP server requests."), + metric.WithUnit("{request}"), +} + // NewServerActiveRequests returns a new ServerActiveRequests instrument. func NewServerActiveRequests( m metric.Meter, @@ -1006,15 +1063,18 @@ func NewServerActiveRequests( return ServerActiveRequests{noop.Int64UpDownCounter{}}, nil } + if len(opt) == 0 { + opt = newServerActiveRequestsOpts + } else { + opt = append(opt, newServerActiveRequestsOpts...) + } + i, err := m.Int64UpDownCounter( "http.server.active_requests", - append([]metric.Int64UpDownCounterOption{ - metric.WithDescription("Number of active HTTP server requests."), - metric.WithUnit("{request}"), - }, opt...)..., + opt..., ) if err != nil { - return ServerActiveRequests{noop.Int64UpDownCounter{}}, err + return ServerActiveRequests{noop.Int64UpDownCounter{}}, err } return ServerActiveRequests{i}, nil } @@ -1118,6 +1178,11 @@ type ServerRequestBodySize struct { metric.Int64Histogram } +var newServerRequestBodySizeOpts = []metric.Int64HistogramOption{ + metric.WithDescription("Size of HTTP server request bodies."), + metric.WithUnit("By"), +} + // NewServerRequestBodySize returns a new ServerRequestBodySize instrument. func NewServerRequestBodySize( m metric.Meter, @@ -1128,15 +1193,18 @@ func NewServerRequestBodySize( return ServerRequestBodySize{noop.Int64Histogram{}}, nil } + if len(opt) == 0 { + opt = newServerRequestBodySizeOpts + } else { + opt = append(opt, newServerRequestBodySizeOpts...) + } + i, err := m.Int64Histogram( "http.server.request.body.size", - append([]metric.Int64HistogramOption{ - metric.WithDescription("Size of HTTP server request bodies."), - metric.WithUnit("By"), - }, opt...)..., + opt..., ) if err != nil { - return ServerRequestBodySize{noop.Int64Histogram{}}, err + return ServerRequestBodySize{noop.Int64Histogram{}}, err } return ServerRequestBodySize{i}, nil } @@ -1220,6 +1288,7 @@ func (m ServerRequestBodySize) Record( func (m ServerRequestBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) { if set.Len() == 0 { m.Int64Histogram.Record(ctx, val) + return } o := recOptPool.Get().(*[]metric.RecordOption) @@ -1299,6 +1368,11 @@ type ServerRequestDuration struct { metric.Float64Histogram } +var newServerRequestDurationOpts = []metric.Float64HistogramOption{ + metric.WithDescription("Duration of HTTP server requests."), + metric.WithUnit("s"), +} + // NewServerRequestDuration returns a new ServerRequestDuration instrument. func NewServerRequestDuration( m metric.Meter, @@ -1309,15 +1383,18 @@ func NewServerRequestDuration( return ServerRequestDuration{noop.Float64Histogram{}}, nil } + if len(opt) == 0 { + opt = newServerRequestDurationOpts + } else { + opt = append(opt, newServerRequestDurationOpts...) + } + i, err := m.Float64Histogram( "http.server.request.duration", - append([]metric.Float64HistogramOption{ - metric.WithDescription("Duration of HTTP server requests."), - metric.WithUnit("s"), - }, opt...)..., + opt..., ) if err != nil { - return ServerRequestDuration{noop.Float64Histogram{}}, err + return ServerRequestDuration{noop.Float64Histogram{}}, err } return ServerRequestDuration{i}, nil } @@ -1387,6 +1464,7 @@ func (m ServerRequestDuration) Record( func (m ServerRequestDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { if set.Len() == 0 { m.Float64Histogram.Record(ctx, val) + return } o := recOptPool.Get().(*[]metric.RecordOption) @@ -1466,6 +1544,11 @@ type ServerResponseBodySize struct { metric.Int64Histogram } +var newServerResponseBodySizeOpts = []metric.Int64HistogramOption{ + metric.WithDescription("Size of HTTP server response bodies."), + metric.WithUnit("By"), +} + // NewServerResponseBodySize returns a new ServerResponseBodySize instrument. func NewServerResponseBodySize( m metric.Meter, @@ -1476,15 +1559,18 @@ func NewServerResponseBodySize( return ServerResponseBodySize{noop.Int64Histogram{}}, nil } + if len(opt) == 0 { + opt = newServerResponseBodySizeOpts + } else { + opt = append(opt, newServerResponseBodySizeOpts...) + } + i, err := m.Int64Histogram( "http.server.response.body.size", - append([]metric.Int64HistogramOption{ - metric.WithDescription("Size of HTTP server response bodies."), - metric.WithUnit("By"), - }, opt...)..., + opt..., ) if err != nil { - return ServerResponseBodySize{noop.Int64Histogram{}}, err + return ServerResponseBodySize{noop.Int64Histogram{}}, err } return ServerResponseBodySize{i}, nil } @@ -1568,6 +1654,7 @@ func (m ServerResponseBodySize) Record( func (m ServerResponseBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) { if set.Len() == 0 { m.Int64Histogram.Record(ctx, val) + return } o := recOptPool.Get().(*[]metric.RecordOption) @@ -1638,4 +1725,4 @@ func (ServerResponseBodySize) AttrServerPort(val int) attribute.KeyValue { // the category of synthetic traffic, such as tests or bots. func (ServerResponseBodySize) AttrUserAgentSyntheticType(val UserAgentSyntheticTypeAttr) attribute.KeyValue { return attribute.String("user_agent.synthetic.type", string(val)) -} \ No newline at end of file +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go index a78eafd1fa3..fd064530c34 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go @@ -3,7 +3,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -// Package httpconv provides types and functionality for OpenTelemetry semantic +// Package otelconv provides types and functionality for OpenTelemetry semantic // conventions in the "otel" namespace. package otelconv @@ -172,6 +172,11 @@ type SDKExporterLogExported struct { metric.Int64Counter } +var newSDKExporterLogExportedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of log records for which the export has finished, either successful or failed."), + metric.WithUnit("{log_record}"), +} + // NewSDKExporterLogExported returns a new SDKExporterLogExported instrument. func NewSDKExporterLogExported( m metric.Meter, @@ -182,15 +187,18 @@ func NewSDKExporterLogExported( return SDKExporterLogExported{noop.Int64Counter{}}, nil } + if len(opt) == 0 { + opt = newSDKExporterLogExportedOpts + } else { + opt = append(opt, newSDKExporterLogExportedOpts...) + } + i, err := m.Int64Counter( "otel.sdk.exporter.log.exported", - append([]metric.Int64CounterOption{ - metric.WithDescription("The number of log records for which the export has finished, either successful or failed."), - metric.WithUnit("{log_record}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKExporterLogExported{noop.Int64Counter{}}, err + return SDKExporterLogExported{noop.Int64Counter{}}, err } return SDKExporterLogExported{i}, nil } @@ -319,6 +327,11 @@ type SDKExporterLogInflight struct { metric.Int64UpDownCounter } +var newSDKExporterLogInflightOpts = []metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), + metric.WithUnit("{log_record}"), +} + // NewSDKExporterLogInflight returns a new SDKExporterLogInflight instrument. func NewSDKExporterLogInflight( m metric.Meter, @@ -329,15 +342,18 @@ func NewSDKExporterLogInflight( return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, nil } + if len(opt) == 0 { + opt = newSDKExporterLogInflightOpts + } else { + opt = append(opt, newSDKExporterLogInflightOpts...) + } + i, err := m.Int64UpDownCounter( "otel.sdk.exporter.log.inflight", - append([]metric.Int64UpDownCounterOption{ - metric.WithDescription("The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), - metric.WithUnit("{log_record}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, err + return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, err } return SDKExporterLogInflight{i}, nil } @@ -449,6 +465,11 @@ type SDKExporterMetricDataPointExported struct { metric.Int64Counter } +var newSDKExporterMetricDataPointExportedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of metric data points for which the export has finished, either successful or failed."), + metric.WithUnit("{data_point}"), +} + // NewSDKExporterMetricDataPointExported returns a new // SDKExporterMetricDataPointExported instrument. func NewSDKExporterMetricDataPointExported( @@ -460,15 +481,18 @@ func NewSDKExporterMetricDataPointExported( return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, nil } + if len(opt) == 0 { + opt = newSDKExporterMetricDataPointExportedOpts + } else { + opt = append(opt, newSDKExporterMetricDataPointExportedOpts...) + } + i, err := m.Int64Counter( "otel.sdk.exporter.metric_data_point.exported", - append([]metric.Int64CounterOption{ - metric.WithDescription("The number of metric data points for which the export has finished, either successful or failed."), - metric.WithUnit("{data_point}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, err + return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, err } return SDKExporterMetricDataPointExported{i}, nil } @@ -598,6 +622,11 @@ type SDKExporterMetricDataPointInflight struct { metric.Int64UpDownCounter } +var newSDKExporterMetricDataPointInflightOpts = []metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), + metric.WithUnit("{data_point}"), +} + // NewSDKExporterMetricDataPointInflight returns a new // SDKExporterMetricDataPointInflight instrument. func NewSDKExporterMetricDataPointInflight( @@ -609,15 +638,18 @@ func NewSDKExporterMetricDataPointInflight( return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, nil } + if len(opt) == 0 { + opt = newSDKExporterMetricDataPointInflightOpts + } else { + opt = append(opt, newSDKExporterMetricDataPointInflightOpts...) + } + i, err := m.Int64UpDownCounter( "otel.sdk.exporter.metric_data_point.inflight", - append([]metric.Int64UpDownCounterOption{ - metric.WithDescription("The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), - metric.WithUnit("{data_point}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, err + return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, err } return SDKExporterMetricDataPointInflight{i}, nil } @@ -728,6 +760,11 @@ type SDKExporterOperationDuration struct { metric.Float64Histogram } +var newSDKExporterOperationDurationOpts = []metric.Float64HistogramOption{ + metric.WithDescription("The duration of exporting a batch of telemetry records."), + metric.WithUnit("s"), +} + // NewSDKExporterOperationDuration returns a new SDKExporterOperationDuration // instrument. func NewSDKExporterOperationDuration( @@ -739,15 +776,18 @@ func NewSDKExporterOperationDuration( return SDKExporterOperationDuration{noop.Float64Histogram{}}, nil } + if len(opt) == 0 { + opt = newSDKExporterOperationDurationOpts + } else { + opt = append(opt, newSDKExporterOperationDurationOpts...) + } + i, err := m.Float64Histogram( "otel.sdk.exporter.operation.duration", - append([]metric.Float64HistogramOption{ - metric.WithDescription("The duration of exporting a batch of telemetry records."), - metric.WithUnit("s"), - }, opt...)..., + opt..., ) if err != nil { - return SDKExporterOperationDuration{noop.Float64Histogram{}}, err + return SDKExporterOperationDuration{noop.Float64Histogram{}}, err } return SDKExporterOperationDuration{i}, nil } @@ -825,6 +865,7 @@ func (m SDKExporterOperationDuration) Record( func (m SDKExporterOperationDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { if set.Len() == 0 { m.Float64Histogram.Record(ctx, val) + return } o := recOptPool.Get().(*[]metric.RecordOption) @@ -893,6 +934,11 @@ type SDKExporterSpanExported struct { metric.Int64Counter } +var newSDKExporterSpanExportedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of spans for which the export has finished, either successful or failed."), + metric.WithUnit("{span}"), +} + // NewSDKExporterSpanExported returns a new SDKExporterSpanExported instrument. func NewSDKExporterSpanExported( m metric.Meter, @@ -903,15 +949,18 @@ func NewSDKExporterSpanExported( return SDKExporterSpanExported{noop.Int64Counter{}}, nil } + if len(opt) == 0 { + opt = newSDKExporterSpanExportedOpts + } else { + opt = append(opt, newSDKExporterSpanExportedOpts...) + } + i, err := m.Int64Counter( "otel.sdk.exporter.span.exported", - append([]metric.Int64CounterOption{ - metric.WithDescription("The number of spans for which the export has finished, either successful or failed."), - metric.WithUnit("{span}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKExporterSpanExported{noop.Int64Counter{}}, err + return SDKExporterSpanExported{noop.Int64Counter{}}, err } return SDKExporterSpanExported{i}, nil } @@ -1040,6 +1089,11 @@ type SDKExporterSpanInflight struct { metric.Int64UpDownCounter } +var newSDKExporterSpanInflightOpts = []metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), + metric.WithUnit("{span}"), +} + // NewSDKExporterSpanInflight returns a new SDKExporterSpanInflight instrument. func NewSDKExporterSpanInflight( m metric.Meter, @@ -1050,15 +1104,18 @@ func NewSDKExporterSpanInflight( return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, nil } + if len(opt) == 0 { + opt = newSDKExporterSpanInflightOpts + } else { + opt = append(opt, newSDKExporterSpanInflightOpts...) + } + i, err := m.Int64UpDownCounter( "otel.sdk.exporter.span.inflight", - append([]metric.Int64UpDownCounterOption{ - metric.WithDescription("The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), - metric.WithUnit("{span}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, err + return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, err } return SDKExporterSpanInflight{i}, nil } @@ -1169,6 +1226,11 @@ type SDKLogCreated struct { metric.Int64Counter } +var newSDKLogCreatedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of logs submitted to enabled SDK Loggers."), + metric.WithUnit("{log_record}"), +} + // NewSDKLogCreated returns a new SDKLogCreated instrument. func NewSDKLogCreated( m metric.Meter, @@ -1179,15 +1241,18 @@ func NewSDKLogCreated( return SDKLogCreated{noop.Int64Counter{}}, nil } + if len(opt) == 0 { + opt = newSDKLogCreatedOpts + } else { + opt = append(opt, newSDKLogCreatedOpts...) + } + i, err := m.Int64Counter( "otel.sdk.log.created", - append([]metric.Int64CounterOption{ - metric.WithDescription("The number of logs submitted to enabled SDK Loggers."), - metric.WithUnit("{log_record}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKLogCreated{noop.Int64Counter{}}, err + return SDKLogCreated{noop.Int64Counter{}}, err } return SDKLogCreated{i}, nil } @@ -1254,6 +1319,11 @@ type SDKMetricReaderCollectionDuration struct { metric.Float64Histogram } +var newSDKMetricReaderCollectionDurationOpts = []metric.Float64HistogramOption{ + metric.WithDescription("The duration of the collect operation of the metric reader."), + metric.WithUnit("s"), +} + // NewSDKMetricReaderCollectionDuration returns a new // SDKMetricReaderCollectionDuration instrument. func NewSDKMetricReaderCollectionDuration( @@ -1265,15 +1335,18 @@ func NewSDKMetricReaderCollectionDuration( return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, nil } + if len(opt) == 0 { + opt = newSDKMetricReaderCollectionDurationOpts + } else { + opt = append(opt, newSDKMetricReaderCollectionDurationOpts...) + } + i, err := m.Float64Histogram( "otel.sdk.metric_reader.collection.duration", - append([]metric.Float64HistogramOption{ - metric.WithDescription("The duration of the collect operation of the metric reader."), - metric.WithUnit("s"), - }, opt...)..., + opt..., ) if err != nil { - return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, err + return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, err } return SDKMetricReaderCollectionDuration{i}, nil } @@ -1343,6 +1416,7 @@ func (m SDKMetricReaderCollectionDuration) Record( func (m SDKMetricReaderCollectionDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { if set.Len() == 0 { m.Float64Histogram.Record(ctx, val) + return } o := recOptPool.Get().(*[]metric.RecordOption) @@ -1384,6 +1458,11 @@ type SDKProcessorLogProcessed struct { metric.Int64Counter } +var newSDKProcessorLogProcessedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of log records for which the processing has finished, either successful or failed."), + metric.WithUnit("{log_record}"), +} + // NewSDKProcessorLogProcessed returns a new SDKProcessorLogProcessed instrument. func NewSDKProcessorLogProcessed( m metric.Meter, @@ -1394,15 +1473,18 @@ func NewSDKProcessorLogProcessed( return SDKProcessorLogProcessed{noop.Int64Counter{}}, nil } + if len(opt) == 0 { + opt = newSDKProcessorLogProcessedOpts + } else { + opt = append(opt, newSDKProcessorLogProcessedOpts...) + } + i, err := m.Int64Counter( "otel.sdk.processor.log.processed", - append([]metric.Int64CounterOption{ - metric.WithDescription("The number of log records for which the processing has finished, either successful or failed."), - metric.WithUnit("{log_record}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKProcessorLogProcessed{noop.Int64Counter{}}, err + return SDKProcessorLogProcessed{noop.Int64Counter{}}, err } return SDKProcessorLogProcessed{i}, nil } @@ -1515,6 +1597,11 @@ type SDKProcessorLogQueueCapacity struct { metric.Int64ObservableUpDownCounter } +var newSDKProcessorLogQueueCapacityOpts = []metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold."), + metric.WithUnit("{log_record}"), +} + // NewSDKProcessorLogQueueCapacity returns a new SDKProcessorLogQueueCapacity // instrument. func NewSDKProcessorLogQueueCapacity( @@ -1526,15 +1613,18 @@ func NewSDKProcessorLogQueueCapacity( return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, nil } + if len(opt) == 0 { + opt = newSDKProcessorLogQueueCapacityOpts + } else { + opt = append(opt, newSDKProcessorLogQueueCapacityOpts...) + } + i, err := m.Int64ObservableUpDownCounter( "otel.sdk.processor.log.queue.capacity", - append([]metric.Int64ObservableUpDownCounterOption{ - metric.WithDescription("The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold."), - metric.WithUnit("{log_record}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err + return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err } return SDKProcessorLogQueueCapacity{i}, nil } @@ -1581,6 +1671,11 @@ type SDKProcessorLogQueueSize struct { metric.Int64ObservableUpDownCounter } +var newSDKProcessorLogQueueSizeOpts = []metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The number of log records in the queue of a given instance of an SDK log processor."), + metric.WithUnit("{log_record}"), +} + // NewSDKProcessorLogQueueSize returns a new SDKProcessorLogQueueSize instrument. func NewSDKProcessorLogQueueSize( m metric.Meter, @@ -1591,15 +1686,18 @@ func NewSDKProcessorLogQueueSize( return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, nil } + if len(opt) == 0 { + opt = newSDKProcessorLogQueueSizeOpts + } else { + opt = append(opt, newSDKProcessorLogQueueSizeOpts...) + } + i, err := m.Int64ObservableUpDownCounter( "otel.sdk.processor.log.queue.size", - append([]metric.Int64ObservableUpDownCounterOption{ - metric.WithDescription("The number of log records in the queue of a given instance of an SDK log processor."), - metric.WithUnit("{log_record}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, err + return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, err } return SDKProcessorLogQueueSize{i}, nil } @@ -1646,6 +1744,11 @@ type SDKProcessorSpanProcessed struct { metric.Int64Counter } +var newSDKProcessorSpanProcessedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of spans for which the processing has finished, either successful or failed."), + metric.WithUnit("{span}"), +} + // NewSDKProcessorSpanProcessed returns a new SDKProcessorSpanProcessed // instrument. func NewSDKProcessorSpanProcessed( @@ -1657,15 +1760,18 @@ func NewSDKProcessorSpanProcessed( return SDKProcessorSpanProcessed{noop.Int64Counter{}}, nil } + if len(opt) == 0 { + opt = newSDKProcessorSpanProcessedOpts + } else { + opt = append(opt, newSDKProcessorSpanProcessedOpts...) + } + i, err := m.Int64Counter( "otel.sdk.processor.span.processed", - append([]metric.Int64CounterOption{ - metric.WithDescription("The number of spans for which the processing has finished, either successful or failed."), - metric.WithUnit("{span}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKProcessorSpanProcessed{noop.Int64Counter{}}, err + return SDKProcessorSpanProcessed{noop.Int64Counter{}}, err } return SDKProcessorSpanProcessed{i}, nil } @@ -1778,6 +1884,11 @@ type SDKProcessorSpanQueueCapacity struct { metric.Int64ObservableUpDownCounter } +var newSDKProcessorSpanQueueCapacityOpts = []metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The maximum number of spans the queue of a given instance of an SDK span processor can hold."), + metric.WithUnit("{span}"), +} + // NewSDKProcessorSpanQueueCapacity returns a new SDKProcessorSpanQueueCapacity // instrument. func NewSDKProcessorSpanQueueCapacity( @@ -1789,15 +1900,18 @@ func NewSDKProcessorSpanQueueCapacity( return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, nil } + if len(opt) == 0 { + opt = newSDKProcessorSpanQueueCapacityOpts + } else { + opt = append(opt, newSDKProcessorSpanQueueCapacityOpts...) + } + i, err := m.Int64ObservableUpDownCounter( "otel.sdk.processor.span.queue.capacity", - append([]metric.Int64ObservableUpDownCounterOption{ - metric.WithDescription("The maximum number of spans the queue of a given instance of an SDK span processor can hold."), - metric.WithUnit("{span}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err + return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err } return SDKProcessorSpanQueueCapacity{i}, nil } @@ -1844,6 +1958,11 @@ type SDKProcessorSpanQueueSize struct { metric.Int64ObservableUpDownCounter } +var newSDKProcessorSpanQueueSizeOpts = []metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The number of spans in the queue of a given instance of an SDK span processor."), + metric.WithUnit("{span}"), +} + // NewSDKProcessorSpanQueueSize returns a new SDKProcessorSpanQueueSize // instrument. func NewSDKProcessorSpanQueueSize( @@ -1855,15 +1974,18 @@ func NewSDKProcessorSpanQueueSize( return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, nil } + if len(opt) == 0 { + opt = newSDKProcessorSpanQueueSizeOpts + } else { + opt = append(opt, newSDKProcessorSpanQueueSizeOpts...) + } + i, err := m.Int64ObservableUpDownCounter( "otel.sdk.processor.span.queue.size", - append([]metric.Int64ObservableUpDownCounterOption{ - metric.WithDescription("The number of spans in the queue of a given instance of an SDK span processor."), - metric.WithUnit("{span}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, err + return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, err } return SDKProcessorSpanQueueSize{i}, nil } @@ -1910,6 +2032,11 @@ type SDKSpanLive struct { metric.Int64UpDownCounter } +var newSDKSpanLiveOpts = []metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of created spans with `recording=true` for which the end operation has not been called yet."), + metric.WithUnit("{span}"), +} + // NewSDKSpanLive returns a new SDKSpanLive instrument. func NewSDKSpanLive( m metric.Meter, @@ -1920,15 +2047,18 @@ func NewSDKSpanLive( return SDKSpanLive{noop.Int64UpDownCounter{}}, nil } + if len(opt) == 0 { + opt = newSDKSpanLiveOpts + } else { + opt = append(opt, newSDKSpanLiveOpts...) + } + i, err := m.Int64UpDownCounter( "otel.sdk.span.live", - append([]metric.Int64UpDownCounterOption{ - metric.WithDescription("The number of created spans with `recording=true` for which the end operation has not been called yet."), - metric.WithUnit("{span}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKSpanLive{noop.Int64UpDownCounter{}}, err + return SDKSpanLive{noop.Int64UpDownCounter{}}, err } return SDKSpanLive{i}, nil } @@ -2013,6 +2143,11 @@ type SDKSpanStarted struct { metric.Int64Counter } +var newSDKSpanStartedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of created spans."), + metric.WithUnit("{span}"), +} + // NewSDKSpanStarted returns a new SDKSpanStarted instrument. func NewSDKSpanStarted( m metric.Meter, @@ -2023,15 +2158,18 @@ func NewSDKSpanStarted( return SDKSpanStarted{noop.Int64Counter{}}, nil } + if len(opt) == 0 { + opt = newSDKSpanStartedOpts + } else { + opt = append(opt, newSDKSpanStartedOpts...) + } + i, err := m.Int64Counter( "otel.sdk.span.started", - append([]metric.Int64CounterOption{ - metric.WithDescription("The number of created spans."), - metric.WithUnit("{span}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKSpanStarted{noop.Int64Counter{}}, err + return SDKSpanStarted{noop.Int64Counter{}}, err } return SDKSpanStarted{i}, nil } @@ -2123,4 +2261,4 @@ func (SDKSpanStarted) AttrSpanParentOrigin(val SpanParentOriginAttr) attribute.K // value of the sampler for this span. func (SDKSpanStarted) AttrSpanSamplingResult(val SpanSamplingResultAttr) attribute.KeyValue { return attribute.String("otel.span.sampling_result", string(val)) -} \ No newline at end of file +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/rpcconv/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/rpcconv/metric.go index 146b7eda62c..089b0c457fc 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/rpcconv/metric.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/rpcconv/metric.go @@ -3,7 +3,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -// Package httpconv provides types and functionality for OpenTelemetry semantic +// Package rpcconv provides types and functionality for OpenTelemetry semantic // conventions in the "rpc" namespace. package rpcconv @@ -28,6 +28,11 @@ type ClientDuration struct { metric.Float64Histogram } +var newClientDurationOpts = []metric.Float64HistogramOption{ + metric.WithDescription("Measures the duration of outbound RPC."), + metric.WithUnit("ms"), +} + // NewClientDuration returns a new ClientDuration instrument. func NewClientDuration( m metric.Meter, @@ -38,15 +43,18 @@ func NewClientDuration( return ClientDuration{noop.Float64Histogram{}}, nil } + if len(opt) == 0 { + opt = newClientDurationOpts + } else { + opt = append(opt, newClientDurationOpts...) + } + i, err := m.Float64Histogram( "rpc.client.duration", - append([]metric.Float64HistogramOption{ - metric.WithDescription("Measures the duration of outbound RPC."), - metric.WithUnit("ms"), - }, opt...)..., + opt..., ) if err != nil { - return ClientDuration{noop.Float64Histogram{}}, err + return ClientDuration{noop.Float64Histogram{}}, err } return ClientDuration{i}, nil } @@ -102,6 +110,7 @@ func (m ClientDuration) Record(ctx context.Context, val float64, attrs ...attrib func (m ClientDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { if set.Len() == 0 { m.Float64Histogram.Record(ctx, val) + return } o := recOptPool.Get().(*[]metric.RecordOption) @@ -121,6 +130,11 @@ type ClientRequestSize struct { metric.Int64Histogram } +var newClientRequestSizeOpts = []metric.Int64HistogramOption{ + metric.WithDescription("Measures the size of RPC request messages (uncompressed)."), + metric.WithUnit("By"), +} + // NewClientRequestSize returns a new ClientRequestSize instrument. func NewClientRequestSize( m metric.Meter, @@ -131,15 +145,18 @@ func NewClientRequestSize( return ClientRequestSize{noop.Int64Histogram{}}, nil } + if len(opt) == 0 { + opt = newClientRequestSizeOpts + } else { + opt = append(opt, newClientRequestSizeOpts...) + } + i, err := m.Int64Histogram( "rpc.client.request.size", - append([]metric.Int64HistogramOption{ - metric.WithDescription("Measures the size of RPC request messages (uncompressed)."), - metric.WithUnit("By"), - }, opt...)..., + opt..., ) if err != nil { - return ClientRequestSize{noop.Int64Histogram{}}, err + return ClientRequestSize{noop.Int64Histogram{}}, err } return ClientRequestSize{i}, nil } @@ -189,6 +206,7 @@ func (m ClientRequestSize) Record(ctx context.Context, val int64, attrs ...attri func (m ClientRequestSize) RecordSet(ctx context.Context, val int64, set attribute.Set) { if set.Len() == 0 { m.Int64Histogram.Record(ctx, val) + return } o := recOptPool.Get().(*[]metric.RecordOption) @@ -208,6 +226,11 @@ type ClientRequestsPerRPC struct { metric.Int64Histogram } +var newClientRequestsPerRPCOpts = []metric.Int64HistogramOption{ + metric.WithDescription("Measures the number of messages received per RPC."), + metric.WithUnit("{count}"), +} + // NewClientRequestsPerRPC returns a new ClientRequestsPerRPC instrument. func NewClientRequestsPerRPC( m metric.Meter, @@ -218,15 +241,18 @@ func NewClientRequestsPerRPC( return ClientRequestsPerRPC{noop.Int64Histogram{}}, nil } + if len(opt) == 0 { + opt = newClientRequestsPerRPCOpts + } else { + opt = append(opt, newClientRequestsPerRPCOpts...) + } + i, err := m.Int64Histogram( "rpc.client.requests_per_rpc", - append([]metric.Int64HistogramOption{ - metric.WithDescription("Measures the number of messages received per RPC."), - metric.WithUnit("{count}"), - }, opt...)..., + opt..., ) if err != nil { - return ClientRequestsPerRPC{noop.Int64Histogram{}}, err + return ClientRequestsPerRPC{noop.Int64Histogram{}}, err } return ClientRequestsPerRPC{i}, nil } @@ -280,6 +306,7 @@ func (m ClientRequestsPerRPC) Record(ctx context.Context, val int64, attrs ...at func (m ClientRequestsPerRPC) RecordSet(ctx context.Context, val int64, set attribute.Set) { if set.Len() == 0 { m.Int64Histogram.Record(ctx, val) + return } o := recOptPool.Get().(*[]metric.RecordOption) @@ -299,6 +326,11 @@ type ClientResponseSize struct { metric.Int64Histogram } +var newClientResponseSizeOpts = []metric.Int64HistogramOption{ + metric.WithDescription("Measures the size of RPC response messages (uncompressed)."), + metric.WithUnit("By"), +} + // NewClientResponseSize returns a new ClientResponseSize instrument. func NewClientResponseSize( m metric.Meter, @@ -309,15 +341,18 @@ func NewClientResponseSize( return ClientResponseSize{noop.Int64Histogram{}}, nil } + if len(opt) == 0 { + opt = newClientResponseSizeOpts + } else { + opt = append(opt, newClientResponseSizeOpts...) + } + i, err := m.Int64Histogram( "rpc.client.response.size", - append([]metric.Int64HistogramOption{ - metric.WithDescription("Measures the size of RPC response messages (uncompressed)."), - metric.WithUnit("By"), - }, opt...)..., + opt..., ) if err != nil { - return ClientResponseSize{noop.Int64Histogram{}}, err + return ClientResponseSize{noop.Int64Histogram{}}, err } return ClientResponseSize{i}, nil } @@ -367,6 +402,7 @@ func (m ClientResponseSize) Record(ctx context.Context, val int64, attrs ...attr func (m ClientResponseSize) RecordSet(ctx context.Context, val int64, set attribute.Set) { if set.Len() == 0 { m.Int64Histogram.Record(ctx, val) + return } o := recOptPool.Get().(*[]metric.RecordOption) @@ -386,6 +422,11 @@ type ClientResponsesPerRPC struct { metric.Int64Histogram } +var newClientResponsesPerRPCOpts = []metric.Int64HistogramOption{ + metric.WithDescription("Measures the number of messages sent per RPC."), + metric.WithUnit("{count}"), +} + // NewClientResponsesPerRPC returns a new ClientResponsesPerRPC instrument. func NewClientResponsesPerRPC( m metric.Meter, @@ -396,15 +437,18 @@ func NewClientResponsesPerRPC( return ClientResponsesPerRPC{noop.Int64Histogram{}}, nil } + if len(opt) == 0 { + opt = newClientResponsesPerRPCOpts + } else { + opt = append(opt, newClientResponsesPerRPCOpts...) + } + i, err := m.Int64Histogram( "rpc.client.responses_per_rpc", - append([]metric.Int64HistogramOption{ - metric.WithDescription("Measures the number of messages sent per RPC."), - metric.WithUnit("{count}"), - }, opt...)..., + opt..., ) if err != nil { - return ClientResponsesPerRPC{noop.Int64Histogram{}}, err + return ClientResponsesPerRPC{noop.Int64Histogram{}}, err } return ClientResponsesPerRPC{i}, nil } @@ -458,6 +502,7 @@ func (m ClientResponsesPerRPC) Record(ctx context.Context, val int64, attrs ...a func (m ClientResponsesPerRPC) RecordSet(ctx context.Context, val int64, set attribute.Set) { if set.Len() == 0 { m.Int64Histogram.Record(ctx, val) + return } o := recOptPool.Get().(*[]metric.RecordOption) @@ -477,6 +522,11 @@ type ServerDuration struct { metric.Float64Histogram } +var newServerDurationOpts = []metric.Float64HistogramOption{ + metric.WithDescription("Measures the duration of inbound RPC."), + metric.WithUnit("ms"), +} + // NewServerDuration returns a new ServerDuration instrument. func NewServerDuration( m metric.Meter, @@ -487,15 +537,18 @@ func NewServerDuration( return ServerDuration{noop.Float64Histogram{}}, nil } + if len(opt) == 0 { + opt = newServerDurationOpts + } else { + opt = append(opt, newServerDurationOpts...) + } + i, err := m.Float64Histogram( "rpc.server.duration", - append([]metric.Float64HistogramOption{ - metric.WithDescription("Measures the duration of inbound RPC."), - metric.WithUnit("ms"), - }, opt...)..., + opt..., ) if err != nil { - return ServerDuration{noop.Float64Histogram{}}, err + return ServerDuration{noop.Float64Histogram{}}, err } return ServerDuration{i}, nil } @@ -551,6 +604,7 @@ func (m ServerDuration) Record(ctx context.Context, val float64, attrs ...attrib func (m ServerDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { if set.Len() == 0 { m.Float64Histogram.Record(ctx, val) + return } o := recOptPool.Get().(*[]metric.RecordOption) @@ -570,6 +624,11 @@ type ServerRequestSize struct { metric.Int64Histogram } +var newServerRequestSizeOpts = []metric.Int64HistogramOption{ + metric.WithDescription("Measures the size of RPC request messages (uncompressed)."), + metric.WithUnit("By"), +} + // NewServerRequestSize returns a new ServerRequestSize instrument. func NewServerRequestSize( m metric.Meter, @@ -580,15 +639,18 @@ func NewServerRequestSize( return ServerRequestSize{noop.Int64Histogram{}}, nil } + if len(opt) == 0 { + opt = newServerRequestSizeOpts + } else { + opt = append(opt, newServerRequestSizeOpts...) + } + i, err := m.Int64Histogram( "rpc.server.request.size", - append([]metric.Int64HistogramOption{ - metric.WithDescription("Measures the size of RPC request messages (uncompressed)."), - metric.WithUnit("By"), - }, opt...)..., + opt..., ) if err != nil { - return ServerRequestSize{noop.Int64Histogram{}}, err + return ServerRequestSize{noop.Int64Histogram{}}, err } return ServerRequestSize{i}, nil } @@ -638,6 +700,7 @@ func (m ServerRequestSize) Record(ctx context.Context, val int64, attrs ...attri func (m ServerRequestSize) RecordSet(ctx context.Context, val int64, set attribute.Set) { if set.Len() == 0 { m.Int64Histogram.Record(ctx, val) + return } o := recOptPool.Get().(*[]metric.RecordOption) @@ -657,6 +720,11 @@ type ServerRequestsPerRPC struct { metric.Int64Histogram } +var newServerRequestsPerRPCOpts = []metric.Int64HistogramOption{ + metric.WithDescription("Measures the number of messages received per RPC."), + metric.WithUnit("{count}"), +} + // NewServerRequestsPerRPC returns a new ServerRequestsPerRPC instrument. func NewServerRequestsPerRPC( m metric.Meter, @@ -667,15 +735,18 @@ func NewServerRequestsPerRPC( return ServerRequestsPerRPC{noop.Int64Histogram{}}, nil } + if len(opt) == 0 { + opt = newServerRequestsPerRPCOpts + } else { + opt = append(opt, newServerRequestsPerRPCOpts...) + } + i, err := m.Int64Histogram( "rpc.server.requests_per_rpc", - append([]metric.Int64HistogramOption{ - metric.WithDescription("Measures the number of messages received per RPC."), - metric.WithUnit("{count}"), - }, opt...)..., + opt..., ) if err != nil { - return ServerRequestsPerRPC{noop.Int64Histogram{}}, err + return ServerRequestsPerRPC{noop.Int64Histogram{}}, err } return ServerRequestsPerRPC{i}, nil } @@ -729,6 +800,7 @@ func (m ServerRequestsPerRPC) Record(ctx context.Context, val int64, attrs ...at func (m ServerRequestsPerRPC) RecordSet(ctx context.Context, val int64, set attribute.Set) { if set.Len() == 0 { m.Int64Histogram.Record(ctx, val) + return } o := recOptPool.Get().(*[]metric.RecordOption) @@ -748,6 +820,11 @@ type ServerResponseSize struct { metric.Int64Histogram } +var newServerResponseSizeOpts = []metric.Int64HistogramOption{ + metric.WithDescription("Measures the size of RPC response messages (uncompressed)."), + metric.WithUnit("By"), +} + // NewServerResponseSize returns a new ServerResponseSize instrument. func NewServerResponseSize( m metric.Meter, @@ -758,15 +835,18 @@ func NewServerResponseSize( return ServerResponseSize{noop.Int64Histogram{}}, nil } + if len(opt) == 0 { + opt = newServerResponseSizeOpts + } else { + opt = append(opt, newServerResponseSizeOpts...) + } + i, err := m.Int64Histogram( "rpc.server.response.size", - append([]metric.Int64HistogramOption{ - metric.WithDescription("Measures the size of RPC response messages (uncompressed)."), - metric.WithUnit("By"), - }, opt...)..., + opt..., ) if err != nil { - return ServerResponseSize{noop.Int64Histogram{}}, err + return ServerResponseSize{noop.Int64Histogram{}}, err } return ServerResponseSize{i}, nil } @@ -816,6 +896,7 @@ func (m ServerResponseSize) Record(ctx context.Context, val int64, attrs ...attr func (m ServerResponseSize) RecordSet(ctx context.Context, val int64, set attribute.Set) { if set.Len() == 0 { m.Int64Histogram.Record(ctx, val) + return } o := recOptPool.Get().(*[]metric.RecordOption) @@ -835,6 +916,11 @@ type ServerResponsesPerRPC struct { metric.Int64Histogram } +var newServerResponsesPerRPCOpts = []metric.Int64HistogramOption{ + metric.WithDescription("Measures the number of messages sent per RPC."), + metric.WithUnit("{count}"), +} + // NewServerResponsesPerRPC returns a new ServerResponsesPerRPC instrument. func NewServerResponsesPerRPC( m metric.Meter, @@ -845,15 +931,18 @@ func NewServerResponsesPerRPC( return ServerResponsesPerRPC{noop.Int64Histogram{}}, nil } + if len(opt) == 0 { + opt = newServerResponsesPerRPCOpts + } else { + opt = append(opt, newServerResponsesPerRPCOpts...) + } + i, err := m.Int64Histogram( "rpc.server.responses_per_rpc", - append([]metric.Int64HistogramOption{ - metric.WithDescription("Measures the number of messages sent per RPC."), - metric.WithUnit("{count}"), - }, opt...)..., + opt..., ) if err != nil { - return ServerResponsesPerRPC{noop.Int64Histogram{}}, err + return ServerResponsesPerRPC{noop.Int64Histogram{}}, err } return ServerResponsesPerRPC{i}, nil } @@ -907,6 +996,7 @@ func (m ServerResponsesPerRPC) Record(ctx context.Context, val int64, attrs ...a func (m ServerResponsesPerRPC) RecordSet(ctx context.Context, val int64, set attribute.Set) { if set.Len() == 0 { m.Int64Histogram.Record(ctx, val) + return } o := recOptPool.Get().(*[]metric.RecordOption) @@ -917,4 +1007,4 @@ func (m ServerResponsesPerRPC) RecordSet(ctx context.Context, val int64, set att *o = append(*o, metric.WithAttributeSet(set)) m.Int64Histogram.Record(ctx, val, *o...) -} \ No newline at end of file +} diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go index aea11a2b52c..d9ecef1cad2 100644 --- a/vendor/go.opentelemetry.io/otel/trace/config.go +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -4,6 +4,7 @@ package trace // import "go.opentelemetry.io/otel/trace" import ( + "slices" "time" "go.opentelemetry.io/otel/attribute" @@ -304,12 +305,50 @@ func WithInstrumentationVersion(version string) TracerOption { }) } -// WithInstrumentationAttributes sets the instrumentation attributes. +// mergeSets returns the union of keys between a and b. Any duplicate keys will +// use the value associated with b. +func mergeSets(a, b attribute.Set) attribute.Set { + // NewMergeIterator uses the first value for any duplicates. + iter := attribute.NewMergeIterator(&b, &a) + merged := make([]attribute.KeyValue, 0, a.Len()+b.Len()) + for iter.Next() { + merged = append(merged, iter.Attribute()) + } + return attribute.NewSet(merged...) +} + +// WithInstrumentationAttributes adds the instrumentation attributes. // -// The passed attributes will be de-duplicated. +// This is equivalent to calling [WithInstrumentationAttributeSet] with an +// [attribute.Set] created from a clone of the passed attributes. +// [WithInstrumentationAttributeSet] is recommended for more control. +// +// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet] +// options are passed, the attributes will be merged together in the order +// they are passed. Attributes with duplicate keys will use the last value passed. func WithInstrumentationAttributes(attr ...attribute.KeyValue) TracerOption { + set := attribute.NewSet(slices.Clone(attr)...) + return WithInstrumentationAttributeSet(set) +} + +// WithInstrumentationAttributeSet adds the instrumentation attributes. +// +// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet] +// options are passed, the attributes will be merged together in the order +// they are passed. Attributes with duplicate keys will use the last value passed. +func WithInstrumentationAttributeSet(set attribute.Set) TracerOption { + if set.Len() == 0 { + return tracerOptionFunc(func(config TracerConfig) TracerConfig { + return config + }) + } + return tracerOptionFunc(func(config TracerConfig) TracerConfig { - config.attrs = attribute.NewSet(attr...) + if config.attrs.Len() == 0 { + config.attrs = set + } else { + config.attrs = mergeSets(config.attrs, set) + } return config }) } diff --git a/vendor/go.opentelemetry.io/otel/trace/span.go b/vendor/go.opentelemetry.io/otel/trace/span.go index d3aa476ee12..d01e7936649 100644 --- a/vendor/go.opentelemetry.io/otel/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/trace/span.go @@ -66,6 +66,10 @@ type Span interface { // SetAttributes sets kv as attributes of the Span. If a key from kv // already exists for an attribute of the Span it will be overwritten with // the value contained in kv. + // + // Note that adding attributes at span creation using [WithAttributes] is preferred + // to calling SetAttribute later, as samplers can only consider information + // already present during span creation. SetAttributes(kv ...attribute.KeyValue) // TracerProvider returns a TracerProvider that can be used to generate diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index bcaa5aa5378..0d5b0291873 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.38.0" + return "1.39.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 07145e254b5..f4a3893eb5a 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,7 +3,7 @@ module-sets: stable-v1: - version: v1.38.0 + version: v1.39.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus @@ -22,11 +22,11 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.60.0 + version: v0.61.0 modules: - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.14.0 + version: v0.15.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/log/logtest @@ -36,9 +36,28 @@ module-sets: - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: - version: v0.0.13 + version: v0.0.14 modules: - go.opentelemetry.io/otel/schema excluded-modules: - go.opentelemetry.io/otel/internal/tools - go.opentelemetry.io/otel/trace/internal/telemetry/test +modules: + go.opentelemetry.io/otel/exporters/stdout/stdouttrace: + version-refs: + - ./internal/version.go + go.opentelemetry.io/otel/exporters/prometheus: + version-refs: + - ./internal/version.go + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc: + version-refs: + - ./internal/version.go + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc: + version-refs: + - ./internal/version.go + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp: + version-refs: + - ./internal/version.go + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp: + version-refs: + - ./internal/version.go diff --git a/vendor/golang.org/x/mod/module/module.go b/vendor/golang.org/x/mod/module/module.go index 16e1aa7ab47..9d3955bd733 100644 --- a/vendor/golang.org/x/mod/module/module.go +++ b/vendor/golang.org/x/mod/module/module.go @@ -261,7 +261,7 @@ func modPathOK(r rune) bool { // importPathOK reports whether r can appear in a package import path element. // -// Import paths are intermediate between module paths and file paths: we allow +// Import paths are intermediate between module paths and file paths: we // disallow characters that would be confusing or ambiguous as arguments to // 'go get' (such as '@' and ' ' ), but allow certain characters that are // otherwise-unambiguous on the command line and historically used for some diff --git a/vendor/golang.org/x/mod/semver/semver.go b/vendor/golang.org/x/mod/semver/semver.go index 628f8fd687c..824b282c830 100644 --- a/vendor/golang.org/x/mod/semver/semver.go +++ b/vendor/golang.org/x/mod/semver/semver.go @@ -45,8 +45,8 @@ func IsValid(v string) bool { // Canonical returns the canonical formatting of the semantic version v. // It fills in any missing .MINOR or .PATCH and discards build metadata. -// Two semantic versions compare equal only if their canonical formattings -// are identical strings. +// Two semantic versions compare equal only if their canonical formatting +// is an identical string. // The canonical invalid semantic version is the empty string. func Canonical(v string) string { p, ok := parse(v) diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go index 2f45dbc86e5..f69fd754685 100644 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -144,8 +144,8 @@ func (g *Group) SetLimit(n int) { g.sem = nil return } - if len(g.sem) != 0 { - panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", len(g.sem))) + if active := len(g.sem); active != 0 { + panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", active)) } g.sem = make(chan token, n) } diff --git a/vendor/golang.org/x/text/encoding/japanese/eucjp.go b/vendor/golang.org/x/text/encoding/japanese/eucjp.go index 79313fa589a..6fce8c5f528 100644 --- a/vendor/golang.org/x/text/encoding/japanese/eucjp.go +++ b/vendor/golang.org/x/text/encoding/japanese/eucjp.go @@ -17,9 +17,9 @@ import ( var EUCJP encoding.Encoding = &eucJP var eucJP = internal.Encoding{ - &internal.SimpleEncoding{eucJPDecoder{}, eucJPEncoder{}}, - "EUC-JP", - identifier.EUCPkdFmtJapanese, + Encoding: &internal.SimpleEncoding{Decoder: eucJPDecoder{}, Encoder: eucJPEncoder{}}, + Name: "EUC-JP", + MIB: identifier.EUCPkdFmtJapanese, } type eucJPDecoder struct{ transform.NopResetter } diff --git a/vendor/golang.org/x/text/encoding/japanese/iso2022jp.go b/vendor/golang.org/x/text/encoding/japanese/iso2022jp.go index 613226df5e9..6f7bd460a6c 100644 --- a/vendor/golang.org/x/text/encoding/japanese/iso2022jp.go +++ b/vendor/golang.org/x/text/encoding/japanese/iso2022jp.go @@ -17,9 +17,9 @@ import ( var ISO2022JP encoding.Encoding = &iso2022JP var iso2022JP = internal.Encoding{ - internal.FuncEncoding{iso2022JPNewDecoder, iso2022JPNewEncoder}, - "ISO-2022-JP", - identifier.ISO2022JP, + Encoding: internal.FuncEncoding{Decoder: iso2022JPNewDecoder, Encoder: iso2022JPNewEncoder}, + Name: "ISO-2022-JP", + MIB: identifier.ISO2022JP, } func iso2022JPNewDecoder() transform.Transformer { diff --git a/vendor/golang.org/x/text/encoding/japanese/shiftjis.go b/vendor/golang.org/x/text/encoding/japanese/shiftjis.go index 16fd8a6e3ea..af65d43d95e 100644 --- a/vendor/golang.org/x/text/encoding/japanese/shiftjis.go +++ b/vendor/golang.org/x/text/encoding/japanese/shiftjis.go @@ -18,9 +18,9 @@ import ( var ShiftJIS encoding.Encoding = &shiftJIS var shiftJIS = internal.Encoding{ - &internal.SimpleEncoding{shiftJISDecoder{}, shiftJISEncoder{}}, - "Shift JIS", - identifier.ShiftJIS, + Encoding: &internal.SimpleEncoding{Decoder: shiftJISDecoder{}, Encoder: shiftJISEncoder{}}, + Name: "Shift JIS", + MIB: identifier.ShiftJIS, } type shiftJISDecoder struct{ transform.NopResetter } diff --git a/vendor/golang.org/x/text/encoding/korean/euckr.go b/vendor/golang.org/x/text/encoding/korean/euckr.go index 034337f5df5..81c834730c4 100644 --- a/vendor/golang.org/x/text/encoding/korean/euckr.go +++ b/vendor/golang.org/x/text/encoding/korean/euckr.go @@ -20,9 +20,9 @@ var All = []encoding.Encoding{EUCKR} var EUCKR encoding.Encoding = &eucKR var eucKR = internal.Encoding{ - &internal.SimpleEncoding{eucKRDecoder{}, eucKREncoder{}}, - "EUC-KR", - identifier.EUCKR, + Encoding: &internal.SimpleEncoding{Decoder: eucKRDecoder{}, Encoder: eucKREncoder{}}, + Name: "EUC-KR", + MIB: identifier.EUCKR, } type eucKRDecoder struct{ transform.NopResetter } diff --git a/vendor/golang.org/x/text/encoding/simplifiedchinese/gbk.go b/vendor/golang.org/x/text/encoding/simplifiedchinese/gbk.go index 0e0fabfd6b1..2f2fd5d4498 100644 --- a/vendor/golang.org/x/text/encoding/simplifiedchinese/gbk.go +++ b/vendor/golang.org/x/text/encoding/simplifiedchinese/gbk.go @@ -22,21 +22,21 @@ var ( ) var gbk = internal.Encoding{ - &internal.SimpleEncoding{ - gbkDecoder{gb18030: false}, - gbkEncoder{gb18030: false}, + Encoding: &internal.SimpleEncoding{ + Decoder: gbkDecoder{gb18030: false}, + Encoder: gbkEncoder{gb18030: false}, }, - "GBK", - identifier.GBK, + Name: "GBK", + MIB: identifier.GBK, } var gbk18030 = internal.Encoding{ - &internal.SimpleEncoding{ - gbkDecoder{gb18030: true}, - gbkEncoder{gb18030: true}, + Encoding: &internal.SimpleEncoding{ + Decoder: gbkDecoder{gb18030: true}, + Encoder: gbkEncoder{gb18030: true}, }, - "GB18030", - identifier.GB18030, + Name: "GB18030", + MIB: identifier.GB18030, } type gbkDecoder struct { diff --git a/vendor/golang.org/x/text/encoding/simplifiedchinese/hzgb2312.go b/vendor/golang.org/x/text/encoding/simplifiedchinese/hzgb2312.go index e15b7bf6a7c..351750e60e0 100644 --- a/vendor/golang.org/x/text/encoding/simplifiedchinese/hzgb2312.go +++ b/vendor/golang.org/x/text/encoding/simplifiedchinese/hzgb2312.go @@ -17,9 +17,9 @@ import ( var HZGB2312 encoding.Encoding = &hzGB2312 var hzGB2312 = internal.Encoding{ - internal.FuncEncoding{hzGB2312NewDecoder, hzGB2312NewEncoder}, - "HZ-GB2312", - identifier.HZGB2312, + Encoding: internal.FuncEncoding{Decoder: hzGB2312NewDecoder, Encoder: hzGB2312NewEncoder}, + Name: "HZ-GB2312", + MIB: identifier.HZGB2312, } func hzGB2312NewDecoder() transform.Transformer { diff --git a/vendor/golang.org/x/text/encoding/traditionalchinese/big5.go b/vendor/golang.org/x/text/encoding/traditionalchinese/big5.go index 1fcddde0829..5046920ee03 100644 --- a/vendor/golang.org/x/text/encoding/traditionalchinese/big5.go +++ b/vendor/golang.org/x/text/encoding/traditionalchinese/big5.go @@ -20,9 +20,9 @@ var All = []encoding.Encoding{Big5} var Big5 encoding.Encoding = &big5 var big5 = internal.Encoding{ - &internal.SimpleEncoding{big5Decoder{}, big5Encoder{}}, - "Big5", - identifier.Big5, + Encoding: &internal.SimpleEncoding{Decoder: big5Decoder{}, Encoder: big5Encoder{}}, + Name: "Big5", + MIB: identifier.Big5, } type big5Decoder struct{ transform.NopResetter } diff --git a/vendor/golang.org/x/text/encoding/unicode/unicode.go b/vendor/golang.org/x/text/encoding/unicode/unicode.go index dd99ad14d37..ce28c906288 100644 --- a/vendor/golang.org/x/text/encoding/unicode/unicode.go +++ b/vendor/golang.org/x/text/encoding/unicode/unicode.go @@ -60,9 +60,9 @@ func (utf8bomEncoding) NewDecoder() *encoding.Decoder { } var utf8enc = &internal.Encoding{ - &internal.SimpleEncoding{utf8Decoder{}, runes.ReplaceIllFormed()}, - "UTF-8", - identifier.UTF8, + Encoding: &internal.SimpleEncoding{Decoder: utf8Decoder{}, Encoder: runes.ReplaceIllFormed()}, + Name: "UTF-8", + MIB: identifier.UTF8, } type utf8bomDecoder struct { diff --git a/vendor/golang.org/x/tools/go/ast/inspector/cursor.go b/vendor/golang.org/x/tools/go/ast/inspector/cursor.go index 7e72d3c284b..fc9bbc714c6 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/cursor.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/cursor.go @@ -467,7 +467,9 @@ func (c Cursor) FindByPos(start, end token.Pos) (Cursor, bool) { // This algorithm could be implemented using c.Inspect, // but it is about 2.5x slower. - best := int32(-1) // push index of latest (=innermost) node containing range + // best is the push-index of the latest (=innermost) node containing range. + // (Beware: latest is not always innermost because FuncDecl.{Name,Type} overlap.) + best := int32(-1) for i, limit := c.indices(); i < limit; i++ { ev := events[i] if ev.index > i { // push? @@ -481,6 +483,19 @@ func (c Cursor) FindByPos(start, end token.Pos) (Cursor, bool) { continue } } else { + // Edge case: FuncDecl.Name and .Type overlap: + // Don't update best from Name to FuncDecl.Type. + // + // The condition can be read as: + // - n is FuncType + // - n.parent is FuncDecl + // - best is strictly beneath the FuncDecl + if ev.typ == 1< ev.parent { + continue + } + nodeEnd = n.End() if n.Pos() > start { break // disjoint, after; stop diff --git a/vendor/golang.org/x/tools/go/packages/visit.go b/vendor/golang.org/x/tools/go/packages/visit.go index af6a60d75f8..c546b1b63e3 100644 --- a/vendor/golang.org/x/tools/go/packages/visit.go +++ b/vendor/golang.org/x/tools/go/packages/visit.go @@ -78,7 +78,7 @@ func PrintErrors(pkgs []*Package) int { return n } -// Postorder returns an iterator over the the packages in +// Postorder returns an iterator over the packages in // the import graph whose roots are pkg. // Packages are enumerated in dependencies-first order. func Postorder(pkgs []*Package) iter.Seq[*Package] { diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index 6c0c74968f3..6646bf55089 100644 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -249,7 +249,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { case *types.Func: // A func, if not package-level, must be a method. - if recv := obj.Type().(*types.Signature).Recv(); recv == nil { + if recv := obj.Signature().Recv(); recv == nil { return "", fmt.Errorf("func is not a method: %v", obj) } @@ -405,7 +405,7 @@ func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { return "", false } - _, named := typesinternal.ReceiverNamed(meth.Type().(*types.Signature).Recv()) + _, named := typesinternal.ReceiverNamed(meth.Signature().Recv()) if named == nil { return "", false } diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go index f035a0b6be9..36624572a66 100644 --- a/vendor/golang.org/x/tools/go/types/typeutil/map.go +++ b/vendor/golang.org/x/tools/go/types/typeutil/map.go @@ -304,8 +304,7 @@ func (h hasher) hash(t types.Type) uint32 { case *types.Named: hash := h.hashTypeName(t.Obj()) targs := t.TypeArgs() - for i := 0; i < targs.Len(); i++ { - targ := targs.At(i) + for targ := range targs.Types() { hash += 2 * h.hash(targ) } return hash diff --git a/vendor/golang.org/x/tools/imports/forward.go b/vendor/golang.org/x/tools/imports/forward.go index 22ae7777267..5d120d077c0 100644 --- a/vendor/golang.org/x/tools/imports/forward.go +++ b/vendor/golang.org/x/tools/imports/forward.go @@ -69,3 +69,9 @@ func Process(filename string, src []byte, opt *Options) ([]byte, error) { } return intimp.Process(filename, src, intopt) } + +// VendorlessPath returns the devendorized version of the import path ipath. +// For example, VendorlessPath("foo/barbendor/a/b") return "a/b". +func VendorlessPath(ipath string) string { + return intimp.VendorlessPath(ipath) +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go index 734c46198df..555ef626c00 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go @@ -34,7 +34,7 @@ type fileInfo struct { const maxlines = 64 * 1024 func (s *fakeFileSet) pos(file string, line, column int) token.Pos { - // TODO(mdempsky): Make use of column. + _ = column // TODO(mdempsky): Make use of column. // Since we don't know the set of needed file positions, we reserve maxlines // positions per file. We delay calling token.File.SetLines until all diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index 4a4357d2bd4..2bef2b058ba 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -829,8 +829,7 @@ func (p *iexporter) doDecl(obj types.Object) { // their name must be qualified before exporting recv. if rparams := sig.RecvTypeParams(); rparams.Len() > 0 { prefix := obj.Name() + "." + m.Name() - for i := 0; i < rparams.Len(); i++ { - rparam := rparams.At(i) + for rparam := range rparams.TypeParams() { name := tparamExportName(prefix, rparam) w.p.tparamNames[rparam.Obj()] = name } @@ -944,6 +943,13 @@ func (w *exportWriter) posV0(pos token.Pos) { } func (w *exportWriter) pkg(pkg *types.Package) { + if pkg == nil { + // [exportWriter.typ] accepts a nil pkg only for types + // of constants, which cannot contain named objects + // such as fields or methods and thus should never + // reach this method (#76222). + panic("nil package") + } // Ensure any referenced packages are declared in the main index. w.p.allPkgs[pkg] = true @@ -959,9 +965,11 @@ func (w *exportWriter) qualifiedType(obj *types.TypeName) { w.pkg(obj.Pkg()) } -// TODO(rfindley): what does 'pkg' even mean here? It would be better to pass -// it in explicitly into signatures and structs that may use it for -// constructing fields. +// typ emits the specified type. +// +// Objects within the type (struct fields and interface methods) are +// qualified by pkg. It may be nil if the type cannot contain objects, +// such as the type of a constant. func (w *exportWriter) typ(t types.Type, pkg *types.Package) { w.data.uint64(w.p.typOff(t, pkg)) } @@ -991,6 +999,7 @@ func (w *exportWriter) startType(k itag) { w.data.uint64(uint64(k)) } +// doTyp is the implementation of [exportWriter.typ]. func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { if trace { w.p.trace("exporting type %s (%T)", t, t) @@ -1064,7 +1073,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { case *types.Signature: w.startType(signatureType) - w.pkg(pkg) + w.pkg(pkg) // qualifies param/result vars w.signature(t) case *types.Struct: @@ -1110,19 +1119,19 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { case *types.Interface: w.startType(interfaceType) - w.pkg(pkg) + w.pkg(pkg) // qualifies unexported method funcs n := t.NumEmbeddeds() w.uint64(uint64(n)) for i := 0; i < n; i++ { ft := t.EmbeddedType(i) - tPkg := pkg if named, _ := types.Unalias(ft).(*types.Named); named != nil { w.pos(named.Obj().Pos()) } else { + // e.g. ~int w.pos(token.NoPos) } - w.typ(ft, tPkg) + w.typ(ft, pkg) } // See comment for struct fields. In shallow mode we change the encoding @@ -1223,20 +1232,19 @@ func (w *exportWriter) signature(sig *types.Signature) { func (w *exportWriter) typeList(ts *types.TypeList, pkg *types.Package) { w.uint64(uint64(ts.Len())) - for i := 0; i < ts.Len(); i++ { - w.typ(ts.At(i), pkg) + for t := range ts.Types() { + w.typ(t, pkg) } } func (w *exportWriter) tparamList(prefix string, list *types.TypeParamList, pkg *types.Package) { ll := uint64(list.Len()) w.uint64(ll) - for i := 0; i < list.Len(); i++ { - tparam := list.At(i) + for tparam := range list.TypeParams() { // Set the type parameter exportName before exporting its type. exportName := tparamExportName(prefix, tparam) w.p.tparamNames[tparam.Obj()] = exportName - w.typ(list.At(i), pkg) + w.typ(tparam, pkg) } } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 82e6c9d2dc1..4d6d50094a0 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -432,10 +432,10 @@ func (p *iimporter) doDecl(pkg *types.Package, name string) { errorf("%v.%v not in index", pkg, name) } - r := &importReader{p: p, currPkg: pkg} + r := &importReader{p: p} r.declReader.Reset(p.declData[off:]) - r.obj(name) + r.obj(pkg, name) } func (p *iimporter) stringAt(off uint64) string { @@ -551,7 +551,6 @@ func canReuse(def *types.Named, rhs types.Type) bool { type importReader struct { p *iimporter declReader bytes.Reader - currPkg *types.Package prevFile string prevLine int64 prevColumn int64 @@ -565,7 +564,8 @@ type importReader struct { // for 1.24, but the fix was not worth back-porting). var markBlack = func(name *types.TypeName) {} -func (r *importReader) obj(name string) { +// obj decodes and declares the package-level object denoted by (pkg, name). +func (r *importReader) obj(pkg *types.Package, name string) { tag := r.byte() pos := r.pos() @@ -576,27 +576,27 @@ func (r *importReader) obj(name string) { tparams = r.tparamList() } typ := r.typ() - obj := aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ, tparams) + obj := aliases.NewAlias(r.p.aliases, pos, pkg, name, typ, tparams) markBlack(obj) // workaround for golang/go#69912 r.declare(obj) case constTag: typ, val := r.value() - r.declare(types.NewConst(pos, r.currPkg, name, typ, val)) + r.declare(types.NewConst(pos, pkg, name, typ, val)) case funcTag, genericFuncTag: var tparams []*types.TypeParam if tag == genericFuncTag { tparams = r.tparamList() } - sig := r.signature(nil, nil, tparams) - r.declare(types.NewFunc(pos, r.currPkg, name, sig)) + sig := r.signature(pkg, nil, nil, tparams) + r.declare(types.NewFunc(pos, pkg, name, sig)) case typeTag, genericTypeTag: // Types can be recursive. We need to setup a stub // declaration before recursing. - obj := types.NewTypeName(pos, r.currPkg, name, nil) + obj := types.NewTypeName(pos, pkg, name, nil) named := types.NewNamed(obj, nil, nil) markBlack(obj) // workaround for golang/go#69912 @@ -616,7 +616,7 @@ func (r *importReader) obj(name string) { for n := r.uint64(); n > 0; n-- { mpos := r.pos() mname := r.ident() - recv := r.param() + recv := r.param(pkg) // If the receiver has any targs, set those as the // rparams of the method (since those are the @@ -630,9 +630,9 @@ func (r *importReader) obj(name string) { rparams[i] = types.Unalias(targs.At(i)).(*types.TypeParam) } } - msig := r.signature(recv, rparams, nil) + msig := r.signature(pkg, recv, rparams, nil) - named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig)) + named.AddMethod(types.NewFunc(mpos, pkg, mname, msig)) } } @@ -644,12 +644,12 @@ func (r *importReader) obj(name string) { errorf("unexpected type param type") } name0 := tparamName(name) - tn := types.NewTypeName(pos, r.currPkg, name0, nil) + tn := types.NewTypeName(pos, pkg, name0, nil) t := types.NewTypeParam(tn, nil) // To handle recursive references to the typeparam within its // bound, save the partial type in tparamIndex before reading the bounds. - id := ident{r.currPkg, name} + id := ident{pkg, name} r.p.tparamIndex[id] = t var implicit bool if r.p.version >= iexportVersionGo1_18 { @@ -672,7 +672,7 @@ func (r *importReader) obj(name string) { case varTag: typ := r.typ() - v := types.NewVar(pos, r.currPkg, name, typ) + v := types.NewVar(pos, pkg, name, typ) typesinternal.SetVarKind(v, typesinternal.PackageVar) r.declare(v) @@ -905,11 +905,11 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { case mapType: return types.NewMap(r.typ(), r.typ()) case signatureType: - r.currPkg = r.pkg() - return r.signature(nil, nil, nil) + paramPkg := r.pkg() + return r.signature(paramPkg, nil, nil, nil) case structType: - r.currPkg = r.pkg() + fieldPkg := r.pkg() fields := make([]*types.Var, r.uint64()) tags := make([]string, len(fields)) @@ -932,7 +932,7 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { // discussed in iexport.go, this is not correct, but mostly works and is // preferable to failing (for now at least). if field == nil { - field = types.NewField(fpos, r.currPkg, fname, ftyp, emb) + field = types.NewField(fpos, fieldPkg, fname, ftyp, emb) } fields[i] = field @@ -941,7 +941,7 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { return types.NewStruct(fields, tags) case interfaceType: - r.currPkg = r.pkg() + methodPkg := r.pkg() // qualifies methods and their param/result vars embeddeds := make([]types.Type, r.uint64()) for i := range embeddeds { @@ -963,12 +963,12 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { // don't agree with this. var recv *types.Var if base != nil { - recv = types.NewVar(token.NoPos, r.currPkg, "", base) + recv = types.NewVar(token.NoPos, methodPkg, "", base) } - msig := r.signature(recv, nil, nil) + msig := r.signature(methodPkg, recv, nil, nil) if method == nil { - method = types.NewFunc(mpos, r.currPkg, mname, msig) + method = types.NewFunc(mpos, methodPkg, mname, msig) } methods[i] = method } @@ -1049,9 +1049,9 @@ func (r *importReader) objectPathObject() types.Object { return obj } -func (r *importReader) signature(recv *types.Var, rparams []*types.TypeParam, tparams []*types.TypeParam) *types.Signature { - params := r.paramList() - results := r.paramList() +func (r *importReader) signature(paramPkg *types.Package, recv *types.Var, rparams []*types.TypeParam, tparams []*types.TypeParam) *types.Signature { + params := r.paramList(paramPkg) + results := r.paramList(paramPkg) variadic := params.Len() > 0 && r.bool() return types.NewSignatureType(recv, rparams, tparams, params, results, variadic) } @@ -1070,19 +1070,19 @@ func (r *importReader) tparamList() []*types.TypeParam { return xs } -func (r *importReader) paramList() *types.Tuple { +func (r *importReader) paramList(pkg *types.Package) *types.Tuple { xs := make([]*types.Var, r.uint64()) for i := range xs { - xs[i] = r.param() + xs[i] = r.param(pkg) } return types.NewTuple(xs...) } -func (r *importReader) param() *types.Var { +func (r *importReader) param(pkg *types.Package) *types.Var { pos := r.pos() name := r.ident() typ := r.typ() - return types.NewParam(pos, r.currPkg, name, typ) + return types.NewParam(pos, pkg, name, typ) } func (r *importReader) bool() bool { diff --git a/vendor/golang.org/x/tools/internal/stdlib/deps.go b/vendor/golang.org/x/tools/internal/stdlib/deps.go index 96ad6c58210..581784da435 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/deps.go +++ b/vendor/golang.org/x/tools/internal/stdlib/deps.go @@ -12,354 +12,508 @@ type pkginfo struct { } var deps = [...]pkginfo{ - {"archive/tar", "\x03k\x03E;\x01\n\x01$\x01\x01\x02\x05\b\x02\x01\x02\x02\f"}, - {"archive/zip", "\x02\x04a\a\x03\x12\x021;\x01+\x05\x01\x0f\x03\x02\x0e\x04"}, - {"bufio", "\x03k\x83\x01D\x14"}, - {"bytes", "n*Y\x03\fG\x02\x02"}, + {"archive/tar", "\x03n\x03E<\x01\n\x01$\x01\x01\x02\x05\b\x02\x01\x02\x02\f"}, + {"archive/zip", "\x02\x04d\a\x03\x12\x021<\x01+\x05\x01\x0f\x03\x02\x0e\x04"}, + {"bufio", "\x03n\x84\x01D\x14"}, + {"bytes", "q*Z\x03\fG\x02\x02"}, {"cmp", ""}, - {"compress/bzip2", "\x02\x02\xed\x01A"}, - {"compress/flate", "\x02l\x03\x80\x01\f\x033\x01\x03"}, - {"compress/gzip", "\x02\x04a\a\x03\x14lT"}, - {"compress/lzw", "\x02l\x03\x80\x01"}, - {"compress/zlib", "\x02\x04a\a\x03\x12\x01m"}, - {"container/heap", "\xb3\x02"}, + {"compress/bzip2", "\x02\x02\xf1\x01A"}, + {"compress/flate", "\x02o\x03\x81\x01\f\x033\x01\x03"}, + {"compress/gzip", "\x02\x04d\a\x03\x14mT"}, + {"compress/lzw", "\x02o\x03\x81\x01"}, + {"compress/zlib", "\x02\x04d\a\x03\x12\x01n"}, + {"container/heap", "\xb7\x02"}, {"container/list", ""}, {"container/ring", ""}, - {"context", "n\\m\x01\r"}, - {"crypto", "\x83\x01nC"}, - {"crypto/aes", "\x10\n\a\x93\x02"}, - {"crypto/cipher", "\x03\x1e\x01\x01\x1e\x11\x1c+X"}, - {"crypto/des", "\x10\x13\x1e-+\x9b\x01\x03"}, - {"crypto/dsa", "A\x04)\x83\x01\r"}, - {"crypto/ecdh", "\x03\v\f\x0e\x04\x15\x04\r\x1c\x83\x01"}, - {"crypto/ecdsa", "\x0e\x05\x03\x04\x01\x0e\a\v\x05\x01\x04\f\x01\x1c\x83\x01\r\x05K\x01"}, - {"crypto/ed25519", "\x0e\x1c\x11\x06\n\a\x1c\x83\x01C"}, - {"crypto/elliptic", "0>\x83\x01\r9"}, - {"crypto/fips140", " \x05"}, - {"crypto/hkdf", "-\x13\x01-\x15"}, - {"crypto/hmac", "\x1a\x14\x12\x01\x111"}, - {"crypto/internal/boring", "\x0e\x02\rf"}, - {"crypto/internal/boring/bbig", "\x1a\xe4\x01M"}, - {"crypto/internal/boring/bcache", "\xb8\x02\x13"}, + {"context", "q[o\x01\r"}, + {"crypto", "\x86\x01oC"}, + {"crypto/aes", "\x10\n\t\x95\x02"}, + {"crypto/cipher", "\x03 \x01\x01\x1f\x11\x1c+Y"}, + {"crypto/des", "\x10\x15\x1f-+\x9c\x01\x03"}, + {"crypto/dsa", "D\x04)\x84\x01\r"}, + {"crypto/ecdh", "\x03\v\f\x10\x04\x16\x04\r\x1c\x84\x01"}, + {"crypto/ecdsa", "\x0e\x05\x03\x04\x01\x10\a\v\x06\x01\x04\f\x01\x1c\x84\x01\r\x05K\x01"}, + {"crypto/ed25519", "\x0e\x1e\x11\a\n\a\x1c\x84\x01C"}, + {"crypto/elliptic", "2?\x84\x01\r9"}, + {"crypto/fips140", "\"\x05"}, + {"crypto/hkdf", "/\x14\x01-\x15"}, + {"crypto/hmac", "\x1a\x16\x13\x01\x111"}, + {"crypto/internal/boring", "\x0e\x02\ri"}, + {"crypto/internal/boring/bbig", "\x1a\xe8\x01M"}, + {"crypto/internal/boring/bcache", "\xbc\x02\x13"}, {"crypto/internal/boring/sig", ""}, - {"crypto/internal/cryptotest", "\x03\r\n\x06$\x0e\x19\x06\x12\x12 \x04\a\t\x16\x01\x11\x11\x1b\x01\a\x05\b\x03\x05\v"}, - {"crypto/internal/entropy", "F"}, - {"crypto/internal/fips140", "?/\x15\xa7\x01\v\x16"}, - {"crypto/internal/fips140/aes", "\x03\x1d\x03\x02\x13\x05\x01\x01\x05*\x92\x014"}, - {"crypto/internal/fips140/aes/gcm", " \x01\x02\x02\x02\x11\x05\x01\x06*\x8f\x01"}, - {"crypto/internal/fips140/alias", "\xcb\x02"}, - {"crypto/internal/fips140/bigmod", "%\x18\x01\x06*\x92\x01"}, - {"crypto/internal/fips140/check", " \x0e\x06\t\x02\xb2\x01Z"}, - {"crypto/internal/fips140/check/checktest", "%\x85\x02!"}, - {"crypto/internal/fips140/drbg", "\x03\x1c\x01\x01\x04\x13\x05\b\x01(\x83\x01\x0f7"}, - {"crypto/internal/fips140/ecdh", "\x03\x1d\x05\x02\t\r1\x83\x01\x0f7"}, - {"crypto/internal/fips140/ecdsa", "\x03\x1d\x04\x01\x02\a\x02\x068\x15nF"}, - {"crypto/internal/fips140/ed25519", "\x03\x1d\x05\x02\x04\v8\xc6\x01\x03"}, - {"crypto/internal/fips140/edwards25519", "%\a\f\x051\x92\x017"}, - {"crypto/internal/fips140/edwards25519/field", "%\x13\x051\x92\x01"}, - {"crypto/internal/fips140/hkdf", "\x03\x1d\x05\t\x06:\x15"}, - {"crypto/internal/fips140/hmac", "\x03\x1d\x14\x01\x018\x15"}, - {"crypto/internal/fips140/mlkem", "\x03\x1d\x05\x02\x0e\x03\x051"}, - {"crypto/internal/fips140/nistec", "%\f\a\x051\x92\x01*\r\x14"}, - {"crypto/internal/fips140/nistec/fiat", "%\x136\x92\x01"}, - {"crypto/internal/fips140/pbkdf2", "\x03\x1d\x05\t\x06:\x15"}, - {"crypto/internal/fips140/rsa", "\x03\x1d\x04\x01\x02\r\x01\x01\x026\x15nF"}, - {"crypto/internal/fips140/sha256", "\x03\x1d\x1d\x01\x06*\x15}"}, - {"crypto/internal/fips140/sha3", "\x03\x1d\x18\x05\x010\x92\x01K"}, - {"crypto/internal/fips140/sha512", "\x03\x1d\x1d\x01\x06*\x15}"}, - {"crypto/internal/fips140/ssh", "%^"}, - {"crypto/internal/fips140/subtle", "#\x1a\xc3\x01"}, - {"crypto/internal/fips140/tls12", "\x03\x1d\x05\t\x06\x028\x15"}, - {"crypto/internal/fips140/tls13", "\x03\x1d\x05\b\a\t1\x15"}, - {"crypto/internal/fips140cache", "\xaa\x02\r&"}, + {"crypto/internal/constanttime", ""}, + {"crypto/internal/cryptotest", "\x03\r\n\b%\x0e\x19\x06\x12\x12 \x04\x06\t\x18\x01\x11\x11\x1b\x01\a\x05\b\x03\x05\v"}, + {"crypto/internal/entropy", "I"}, + {"crypto/internal/entropy/v1.0.0", "B/\x93\x018\x13"}, + {"crypto/internal/fips140", "A0\xbd\x01\v\x16"}, + {"crypto/internal/fips140/aes", "\x03\x1f\x03\x02\x13\x05\x01\x01\x06*\x93\x014"}, + {"crypto/internal/fips140/aes/gcm", "\"\x01\x02\x02\x02\x11\x05\x01\a*\x90\x01"}, + {"crypto/internal/fips140/alias", "\xcf\x02"}, + {"crypto/internal/fips140/bigmod", "'\x18\x01\a*\x93\x01"}, + {"crypto/internal/fips140/check", "\"\x0e\x06\t\x02\xb4\x01Z"}, + {"crypto/internal/fips140/check/checktest", "'\x87\x02!"}, + {"crypto/internal/fips140/drbg", "\x03\x1e\x01\x01\x04\x13\x05\t\x01(\x84\x01\x0f7\x01"}, + {"crypto/internal/fips140/ecdh", "\x03\x1f\x05\x02\t\r2\x84\x01\x0f7"}, + {"crypto/internal/fips140/ecdsa", "\x03\x1f\x04\x01\x02\a\x02\x069\x15oF"}, + {"crypto/internal/fips140/ed25519", "\x03\x1f\x05\x02\x04\v9\xc7\x01\x03"}, + {"crypto/internal/fips140/edwards25519", "\x1e\t\a\x112\x93\x017"}, + {"crypto/internal/fips140/edwards25519/field", "'\x13\x052\x93\x01"}, + {"crypto/internal/fips140/hkdf", "\x03\x1f\x05\t\x06;\x15"}, + {"crypto/internal/fips140/hmac", "\x03\x1f\x14\x01\x019\x15"}, + {"crypto/internal/fips140/mlkem", "\x03\x1f\x05\x02\x0e\x03\x052\xca\x01"}, + {"crypto/internal/fips140/nistec", "\x1e\t\f\f2\x93\x01*\r\x14"}, + {"crypto/internal/fips140/nistec/fiat", "'\x137\x93\x01"}, + {"crypto/internal/fips140/pbkdf2", "\x03\x1f\x05\t\x06;\x15"}, + {"crypto/internal/fips140/rsa", "\x03\x1b\x04\x04\x01\x02\r\x01\x01\x027\x15oF"}, + {"crypto/internal/fips140/sha256", "\x03\x1f\x1d\x01\a*\x15~"}, + {"crypto/internal/fips140/sha3", "\x03\x1f\x18\x05\x011\x93\x01K"}, + {"crypto/internal/fips140/sha512", "\x03\x1f\x1d\x01\a*\x15~"}, + {"crypto/internal/fips140/ssh", "'_"}, + {"crypto/internal/fips140/subtle", "\x1e\a\x1a\xc5\x01"}, + {"crypto/internal/fips140/tls12", "\x03\x1f\x05\t\x06\x029\x15"}, + {"crypto/internal/fips140/tls13", "\x03\x1f\x05\b\a\t2\x15"}, + {"crypto/internal/fips140cache", "\xae\x02\r&"}, {"crypto/internal/fips140deps", ""}, - {"crypto/internal/fips140deps/byteorder", "\x99\x01"}, - {"crypto/internal/fips140deps/cpu", "\xae\x01\a"}, - {"crypto/internal/fips140deps/godebug", "\xb6\x01"}, - {"crypto/internal/fips140hash", "5\x1b3\xc8\x01"}, - {"crypto/internal/fips140only", "'\r\x01\x01M3;"}, + {"crypto/internal/fips140deps/byteorder", "\x9c\x01"}, + {"crypto/internal/fips140deps/cpu", "\xb1\x01\a"}, + {"crypto/internal/fips140deps/godebug", "\xb9\x01"}, + {"crypto/internal/fips140deps/time", "\xc9\x02"}, + {"crypto/internal/fips140hash", "7\x1c3\xc9\x01"}, + {"crypto/internal/fips140only", ")\r\x01\x01N3<"}, {"crypto/internal/fips140test", ""}, - {"crypto/internal/hpke", "\x0e\x01\x01\x03\x053#+gM"}, - {"crypto/internal/impl", "\xb5\x02"}, - {"crypto/internal/randutil", "\xf1\x01\x12"}, - {"crypto/internal/sysrand", "nn! \r\r\x01\x01\f\x06"}, - {"crypto/internal/sysrand/internal/seccomp", "n"}, - {"crypto/md5", "\x0e3-\x15\x16g"}, - {"crypto/mlkem", "/"}, - {"crypto/pbkdf2", "2\x0e\x01-\x15"}, - {"crypto/rand", "\x1a\x06\a\x1a\x04\x01(\x83\x01\rM"}, - {"crypto/rc4", "#\x1e-\xc6\x01"}, - {"crypto/rsa", "\x0e\f\x01\t\x0f\r\x01\x04\x06\a\x1c\x03\x123;\f\x01"}, - {"crypto/sha1", "\x0e\f'\x03*\x15\x16\x15R"}, - {"crypto/sha256", "\x0e\f\x1aO"}, - {"crypto/sha3", "\x0e'N\xc8\x01"}, - {"crypto/sha512", "\x0e\f\x1cM"}, - {"crypto/subtle", "8\x9b\x01W"}, - {"crypto/tls", "\x03\b\x02\x01\x01\x01\x01\x02\x01\x01\x01\x02\x01\x01\a\x01\r\n\x01\t\x05\x03\x01\x01\x01\x01\x02\x01\x02\x01\x17\x02\x03\x12\x16\x15\b;\x16\x16\r\b\x01\x01\x01\x02\x01\r\x06\x02\x01\x0f"}, - {"crypto/tls/internal/fips140tls", "\x17\xa1\x02"}, - {"crypto/x509", "\x03\v\x01\x01\x01\x01\x01\x01\x01\x012\x05\x01\x01\x02\x05\x0e\x06\x02\x02\x03E\x038\x01\x02\b\x01\x01\x02\a\x10\x05\x01\x06\x02\x05\n\x01\x02\x0e\x02\x01\x01\x02\x03\x01"}, - {"crypto/x509/pkix", "d\x06\a\x8d\x01G"}, - {"database/sql", "\x03\nK\x16\x03\x80\x01\v\a\"\x05\b\x02\x03\x01\r\x02\x02\x02"}, - {"database/sql/driver", "\ra\x03\xb4\x01\x0f\x11"}, - {"debug/buildinfo", "\x03X\x02\x01\x01\b\a\x03e\x19\x02\x01+\x0f\x1f"}, - {"debug/dwarf", "\x03d\a\x03\x80\x011\x11\x01\x01"}, - {"debug/elf", "\x03\x06Q\r\a\x03e\x1a\x01,\x17\x01\x16"}, - {"debug/gosym", "\x03d\n\xc2\x01\x01\x01\x02"}, - {"debug/macho", "\x03\x06Q\r\ne\x1b,\x17\x01"}, - {"debug/pe", "\x03\x06Q\r\a\x03e\x1b,\x17\x01\x16"}, - {"debug/plan9obj", "g\a\x03e\x1b,"}, - {"embed", "n*@\x19\x01S"}, + {"crypto/internal/hpke", "\x0e\x01\x01\x03\x056#+hM"}, + {"crypto/internal/impl", "\xb9\x02"}, + {"crypto/internal/randutil", "\xf5\x01\x12"}, + {"crypto/internal/sysrand", "qo! \r\r\x01\x01\f\x06"}, + {"crypto/internal/sysrand/internal/seccomp", "q"}, + {"crypto/md5", "\x0e6-\x15\x16h"}, + {"crypto/mlkem", "1"}, + {"crypto/pbkdf2", "4\x0f\x01-\x15"}, + {"crypto/rand", "\x1a\b\a\x1b\x04\x01(\x84\x01\rM"}, + {"crypto/rc4", "%\x1f-\xc7\x01"}, + {"crypto/rsa", "\x0e\f\x01\v\x0f\x0e\x01\x04\x06\a\x1c\x03\x123<\f\x01"}, + {"crypto/sha1", "\x0e\f*\x03*\x15\x16\x15S"}, + {"crypto/sha256", "\x0e\f\x1cP"}, + {"crypto/sha3", "\x0e)O\xc9\x01"}, + {"crypto/sha512", "\x0e\f\x1eN"}, + {"crypto/subtle", "\x1e\x1c\x9c\x01X"}, + {"crypto/tls", "\x03\b\x02\x01\x01\x01\x01\x02\x01\x01\x01\x02\x01\x01\t\x01\r\n\x01\n\x05\x03\x01\x01\x01\x01\x02\x01\x02\x01\x17\x02\x03\x12\x16\x15\b<\x16\x16\r\b\x01\x01\x01\x02\x01\r\x06\x02\x01\x0f"}, + {"crypto/tls/internal/fips140tls", "\x17\xa5\x02"}, + {"crypto/x509", "\x03\v\x01\x01\x01\x01\x01\x01\x01\x015\x05\x01\x01\x02\x05\x0e\x06\x02\x02\x03E\x039\x01\x02\b\x01\x01\x02\a\x10\x05\x01\x06\x02\x05\b\x02\x01\x02\x0e\x02\x01\x01\x02\x03\x01"}, + {"crypto/x509/pkix", "g\x06\a\x8e\x01G"}, + {"database/sql", "\x03\nN\x16\x03\x81\x01\v\a\"\x05\b\x02\x03\x01\r\x02\x02\x02"}, + {"database/sql/driver", "\rd\x03\xb5\x01\x0f\x11"}, + {"debug/buildinfo", "\x03[\x02\x01\x01\b\a\x03e\x1a\x02\x01+\x0f\x1f"}, + {"debug/dwarf", "\x03g\a\x03\x81\x011\x11\x01\x01"}, + {"debug/elf", "\x03\x06T\r\a\x03e\x1b\x01\f \x17\x01\x16"}, + {"debug/gosym", "\x03g\n\xc3\x01\x01\x01\x02"}, + {"debug/macho", "\x03\x06T\r\ne\x1c,\x17\x01"}, + {"debug/pe", "\x03\x06T\r\a\x03e\x1c,\x17\x01\x16"}, + {"debug/plan9obj", "j\a\x03e\x1c,"}, + {"embed", "q*A\x19\x01S"}, {"embed/internal/embedtest", ""}, {"encoding", ""}, - {"encoding/ascii85", "\xf1\x01C"}, - {"encoding/asn1", "\x03k\x03\x8c\x01\x01'\r\x02\x01\x10\x03\x01"}, - {"encoding/base32", "\xf1\x01A\x02"}, - {"encoding/base64", "\x99\x01XA\x02"}, - {"encoding/binary", "n\x83\x01\f(\r\x05"}, - {"encoding/csv", "\x02\x01k\x03\x80\x01D\x12\x02"}, - {"encoding/gob", "\x02`\x05\a\x03e\x1b\v\x01\x03\x1d\b\x12\x01\x0f\x02"}, - {"encoding/hex", "n\x03\x80\x01A\x03"}, - {"encoding/json", "\x03\x01^\x04\b\x03\x80\x01\f(\r\x02\x01\x02\x10\x01\x01\x02"}, - {"encoding/pem", "\x03c\b\x83\x01A\x03"}, - {"encoding/xml", "\x02\x01_\f\x03\x80\x014\x05\n\x01\x02\x10\x02"}, - {"errors", "\xca\x01\x81\x01"}, - {"expvar", "kK?\b\v\x15\r\b\x02\x03\x01\x11"}, - {"flag", "b\f\x03\x80\x01,\b\x05\b\x02\x01\x10"}, - {"fmt", "nE>\f \b\r\x02\x03\x12"}, - {"go/ast", "\x03\x01m\x0e\x01q\x03)\b\r\x02\x01"}, - {"go/build", "\x02\x01k\x03\x01\x02\x02\a\x02\x01\x17\x1f\x04\x02\t\x19\x13\x01+\x01\x04\x01\a\b\x02\x01\x12\x02\x02"}, - {"go/build/constraint", "n\xc6\x01\x01\x12\x02"}, - {"go/constant", "q\x0f}\x01\x024\x01\x02\x12"}, - {"go/doc", "\x04m\x01\x05\t>31\x10\x02\x01\x12\x02"}, - {"go/doc/comment", "\x03n\xc1\x01\x01\x01\x01\x12\x02"}, - {"go/format", "\x03n\x01\v\x01\x02qD"}, - {"go/importer", "s\a\x01\x01\x04\x01p9"}, - {"go/internal/gccgoimporter", "\x02\x01X\x13\x03\x04\v\x01n\x02,\x01\x05\x11\x01\f\b"}, - {"go/internal/gcimporter", "\x02o\x0f\x010\x05\x0e-,\x15\x03\x02"}, - {"go/internal/srcimporter", "q\x01\x01\n\x03\x01p,\x01\x05\x12\x02\x14"}, - {"go/parser", "\x03k\x03\x01\x02\v\x01q\x01+\x06\x12"}, - {"go/printer", "q\x01\x02\x03\tq\f \x15\x02\x01\x02\v\x05\x02"}, - {"go/scanner", "\x03n\x0fq2\x10\x01\x13\x02"}, - {"go/token", "\x04m\x83\x01>\x02\x03\x01\x0f\x02"}, - {"go/types", "\x03\x01\x06d\x03\x01\x03\b\x03\x02\x15\x1f\x061\x04\x03\t \x06\a\b\x01\x01\x01\x02\x01\x0f\x02\x02"}, - {"go/version", "\xbb\x01z"}, - {"hash", "\xf1\x01"}, - {"hash/adler32", "n\x15\x16"}, - {"hash/crc32", "n\x15\x16\x15\x89\x01\x01\x13"}, - {"hash/crc64", "n\x15\x16\x9e\x01"}, - {"hash/fnv", "n\x15\x16g"}, - {"hash/maphash", "\x83\x01\x11!\x03\x93\x01"}, - {"html", "\xb5\x02\x02\x12"}, - {"html/template", "\x03h\x06\x18-;\x01\n!\x05\x01\x02\x03\f\x01\x02\f\x01\x03\x02"}, - {"image", "\x02l\x1ee\x0f4\x03\x01"}, + {"encoding/ascii85", "\xf5\x01C"}, + {"encoding/asn1", "\x03n\x03e(\x01'\r\x02\x01\x10\x03\x01"}, + {"encoding/base32", "\xf5\x01A\x02"}, + {"encoding/base64", "\x9c\x01YA\x02"}, + {"encoding/binary", "q\x84\x01\f(\r\x05"}, + {"encoding/csv", "\x02\x01n\x03\x81\x01D\x12\x02"}, + {"encoding/gob", "\x02c\x05\a\x03e\x1c\v\x01\x03\x1d\b\x12\x01\x0f\x02"}, + {"encoding/hex", "q\x03\x81\x01A\x03"}, + {"encoding/json", "\x03\x01a\x04\b\x03\x81\x01\f(\r\x02\x01\x02\x10\x01\x01\x02"}, + {"encoding/pem", "\x03f\b\x84\x01A\x03"}, + {"encoding/xml", "\x02\x01b\f\x03\x81\x014\x05\n\x01\x02\x10\x02"}, + {"errors", "\xcc\x01\x83\x01"}, + {"expvar", "nK@\b\v\x15\r\b\x02\x03\x01\x11"}, + {"flag", "e\f\x03\x81\x01,\b\x05\b\x02\x01\x10"}, + {"fmt", "qE&\x19\f \b\r\x02\x03\x12"}, + {"go/ast", "\x03\x01p\x0e\x01r\x03)\b\r\x02\x01\x12\x02"}, + {"go/build", "\x02\x01n\x03\x01\x02\x02\a\x02\x01\x17\x1f\x04\x02\b\x1b\x13\x01+\x01\x04\x01\a\b\x02\x01\x12\x02\x02"}, + {"go/build/constraint", "q\xc7\x01\x01\x12\x02"}, + {"go/constant", "t\x0f~\x01\x024\x01\x02\x12"}, + {"go/doc", "\x04p\x01\x05\t=51\x10\x02\x01\x12\x02"}, + {"go/doc/comment", "\x03q\xc2\x01\x01\x01\x01\x12\x02"}, + {"go/format", "\x03q\x01\v\x01\x02rD"}, + {"go/importer", "v\a\x01\x01\x04\x01q9"}, + {"go/internal/gccgoimporter", "\x02\x01[\x13\x03\x04\v\x01o\x02,\x01\x05\x11\x01\f\b"}, + {"go/internal/gcimporter", "\x02r\x0f\x010\x05\r/,\x15\x03\x02"}, + {"go/internal/srcimporter", "t\x01\x01\n\x03\x01q,\x01\x05\x12\x02\x14"}, + {"go/parser", "\x03n\x03\x01\x02\v\x01r\x01+\x06\x12"}, + {"go/printer", "t\x01\x02\x03\tr\f \x15\x02\x01\x02\v\x05\x02"}, + {"go/scanner", "\x03q\x0fr2\x10\x01\x13\x02"}, + {"go/token", "\x04p\x84\x01>\x02\x03\x01\x0f\x02"}, + {"go/types", "\x03\x01\x06g\x03\x01\x03\b\x03\x024\x062\x04\x03\t \x06\a\b\x01\x01\x01\x02\x01\x0f\x02\x02"}, + {"go/version", "\xbe\x01{"}, + {"hash", "\xf5\x01"}, + {"hash/adler32", "q\x15\x16"}, + {"hash/crc32", "q\x15\x16\x15\x8a\x01\x01\x13"}, + {"hash/crc64", "q\x15\x16\x9f\x01"}, + {"hash/fnv", "q\x15\x16h"}, + {"hash/maphash", "\x86\x01\x11<|"}, + {"html", "\xb9\x02\x02\x12"}, + {"html/template", "\x03k\x06\x18-<\x01\n!\x05\x01\x02\x03\f\x01\x02\f\x01\x03\x02"}, + {"image", "\x02o\x1ef\x0f4\x03\x01"}, {"image/color", ""}, - {"image/color/palette", "\x8c\x01"}, - {"image/draw", "\x8b\x01\x01\x04"}, - {"image/gif", "\x02\x01\x05f\x03\x1a\x01\x01\x01\vX"}, - {"image/internal/imageutil", "\x8b\x01"}, - {"image/jpeg", "\x02l\x1d\x01\x04a"}, - {"image/png", "\x02\a^\n\x12\x02\x06\x01eC"}, - {"index/suffixarray", "\x03d\a\x83\x01\f+\n\x01"}, - {"internal/abi", "\xb5\x01\x96\x01"}, - {"internal/asan", "\xcb\x02"}, - {"internal/bisect", "\xaa\x02\r\x01"}, - {"internal/buildcfg", "qGe\x06\x02\x05\n\x01"}, - {"internal/bytealg", "\xae\x01\x9d\x01"}, + {"image/color/palette", "\x8f\x01"}, + {"image/draw", "\x8e\x01\x01\x04"}, + {"image/gif", "\x02\x01\x05i\x03\x1a\x01\x01\x01\vY"}, + {"image/internal/imageutil", "\x8e\x01"}, + {"image/jpeg", "\x02o\x1d\x01\x04b"}, + {"image/png", "\x02\aa\n\x12\x02\x06\x01fC"}, + {"index/suffixarray", "\x03g\a\x84\x01\f+\n\x01"}, + {"internal/abi", "\xb8\x01\x97\x01"}, + {"internal/asan", "\xcf\x02"}, + {"internal/bisect", "\xae\x02\r\x01"}, + {"internal/buildcfg", "tGf\x06\x02\x05\n\x01"}, + {"internal/bytealg", "\xb1\x01\x9e\x01"}, {"internal/byteorder", ""}, {"internal/cfg", ""}, - {"internal/cgrouptest", "q[Q\x06\x0f\x02\x01\x04\x01"}, - {"internal/chacha8rand", "\x99\x01\x15\a\x96\x01"}, + {"internal/cgrouptest", "tZS\x06\x0f\x02\x01\x04\x01"}, + {"internal/chacha8rand", "\x9c\x01\x15\a\x97\x01"}, {"internal/copyright", ""}, {"internal/coverage", ""}, {"internal/coverage/calloc", ""}, - {"internal/coverage/cfile", "k\x06\x16\x17\x01\x02\x01\x01\x01\x01\x01\x01\x01#\x02$,\x06\a\n\x01\x03\r\x06"}, - {"internal/coverage/cformat", "\x04m-\x04O\v6\x01\x02\r"}, - {"internal/coverage/cmerge", "q-_"}, - {"internal/coverage/decodecounter", "g\n-\v\x02F,\x17\x17"}, - {"internal/coverage/decodemeta", "\x02e\n\x16\x17\v\x02F,"}, - {"internal/coverage/encodecounter", "\x02e\n-\f\x01\x02D\v!\x15"}, - {"internal/coverage/encodemeta", "\x02\x01d\n\x12\x04\x17\r\x02D,."}, - {"internal/coverage/pods", "\x04m-\x7f\x06\x05\n\x02\x01"}, - {"internal/coverage/rtcov", "\xcb\x02"}, - {"internal/coverage/slicereader", "g\n\x80\x01Z"}, - {"internal/coverage/slicewriter", "q\x80\x01"}, - {"internal/coverage/stringtab", "q8\x04D"}, + {"internal/coverage/cfile", "n\x06\x16\x17\x01\x02\x01\x01\x01\x01\x01\x01\x01\"\x02&,\x06\a\n\x01\x03\r\x06"}, + {"internal/coverage/cformat", "\x04p-\x04P\v6\x01\x02\r"}, + {"internal/coverage/cmerge", "t-`"}, + {"internal/coverage/decodecounter", "j\n-\v\x02G,\x17\x17"}, + {"internal/coverage/decodemeta", "\x02h\n\x16\x17\v\x02G,"}, + {"internal/coverage/encodecounter", "\x02h\n-\f\x01\x02E\v!\x15"}, + {"internal/coverage/encodemeta", "\x02\x01g\n\x12\x04\x17\r\x02E,."}, + {"internal/coverage/pods", "\x04p-\x80\x01\x06\x05\n\x02\x01"}, + {"internal/coverage/rtcov", "\xcf\x02"}, + {"internal/coverage/slicereader", "j\n\x81\x01Z"}, + {"internal/coverage/slicewriter", "t\x81\x01"}, + {"internal/coverage/stringtab", "t8\x04E"}, {"internal/coverage/test", ""}, {"internal/coverage/uleb128", ""}, - {"internal/cpu", "\xcb\x02"}, - {"internal/dag", "\x04m\xc1\x01\x03"}, - {"internal/diff", "\x03n\xc2\x01\x02"}, - {"internal/exportdata", "\x02\x01k\x03\x02c\x1b,\x01\x05\x11\x01\x02"}, - {"internal/filepathlite", "n*@\x1a@"}, - {"internal/fmtsort", "\x04\xa1\x02\r"}, - {"internal/fuzz", "\x03\nB\x18\x04\x03\x03\x01\v\x036;\f\x03\x1d\x01\x05\x02\x05\n\x01\x02\x01\x01\f\x04\x02"}, + {"internal/cpu", "\xcf\x02"}, + {"internal/dag", "\x04p\xc2\x01\x03"}, + {"internal/diff", "\x03q\xc3\x01\x02"}, + {"internal/exportdata", "\x02\x01n\x03\x02c\x1c,\x01\x05\x11\x01\x02"}, + {"internal/filepathlite", "q*A\x1a@"}, + {"internal/fmtsort", "\x04\xa5\x02\r"}, + {"internal/fuzz", "\x03\nE\x18\x04\x03\x03\x01\v\x036<\f\x03\x1d\x01\x05\x02\x05\n\x01\x02\x01\x01\f\x04\x02"}, {"internal/goarch", ""}, - {"internal/godebug", "\x96\x01!\x80\x01\x01\x13"}, + {"internal/godebug", "\x99\x01!\x81\x01\x01\x13"}, {"internal/godebugs", ""}, {"internal/goexperiment", ""}, {"internal/goos", ""}, - {"internal/goroot", "\x9d\x02\x01\x05\x12\x02"}, + {"internal/goroot", "\xa1\x02\x01\x05\x12\x02"}, {"internal/gover", "\x04"}, {"internal/goversion", ""}, - {"internal/itoa", ""}, - {"internal/lazyregexp", "\x9d\x02\v\r\x02"}, - {"internal/lazytemplate", "\xf1\x01,\x18\x02\f"}, - {"internal/msan", "\xcb\x02"}, + {"internal/lazyregexp", "\xa1\x02\v\r\x02"}, + {"internal/lazytemplate", "\xf5\x01,\x18\x02\f"}, + {"internal/msan", "\xcf\x02"}, {"internal/nettrace", ""}, - {"internal/obscuretestdata", "f\x8b\x01,"}, - {"internal/oserror", "n"}, - {"internal/pkgbits", "\x03L\x18\a\x03\x04\vq\r\x1f\r\n\x01"}, + {"internal/obscuretestdata", "i\x8c\x01,"}, + {"internal/oserror", "q"}, + {"internal/pkgbits", "\x03O\x18\a\x03\x04\vr\r\x1f\r\n\x01"}, {"internal/platform", ""}, - {"internal/poll", "nO\x1f\x159\r\x01\x01\f\x06"}, - {"internal/profile", "\x03\x04g\x03\x80\x017\v\x01\x01\x10"}, + {"internal/poll", "qj\x05\x159\r\x01\x01\f\x06"}, + {"internal/profile", "\x03\x04j\x03\x81\x017\n\x01\x01\x01\x10"}, {"internal/profilerecord", ""}, - {"internal/race", "\x94\x01\xb7\x01"}, - {"internal/reflectlite", "\x94\x01!9\b\x13\x01\a\x03E;\x01\x03\a\x01\x03\x02\x02\x01\x02\x06\x02\x01\x01\n\x01\x01\x05\x01\x02\x05\b\x01\x01\x01\x02\x01\r\x02\x02\x02\b\x01\x01\x01"}, - {"net/http/cgi", "\x02Q\x1b\x03\x80\x01\x04\a\v\x01\x13\x01\x01\x01\x04\x01\x05\x02\b\x02\x01\x10\x0e"}, - {"net/http/cookiejar", "\x04j\x03\x96\x01\x01\b\f\x16\x03\x02\x0e\x04"}, - {"net/http/fcgi", "\x02\x01\nZ\a\x03\x80\x01\x16\x01\x01\x14\x18\x02\x0e"}, - {"net/http/httptest", "\x02\x01\nF\x02\x1b\x01\x80\x01\x04\x12\x01\n\t\x02\x17\x01\x02\x0e\x0e"}, - {"net/http/httptrace", "\rFnF\x14\n "}, - {"net/http/httputil", "\x02\x01\na\x03\x80\x01\x04\x0f\x03\x01\x05\x02\x01\v\x01\x19\x02\x0e\x0e"}, - {"net/http/internal", "\x02\x01k\x03\x80\x01"}, - {"net/http/internal/ascii", "\xb5\x02\x12"}, - {"net/http/internal/httpcommon", "\ra\x03\x9c\x01\x0e\x01\x17\x01\x01\x02\x1c\x02"}, - {"net/http/internal/testcert", "\xb5\x02"}, - {"net/http/pprof", "\x02\x01\nd\x18-\x11*\x04\x13\x14\x01\r\x04\x03\x01\x02\x01\x10"}, + {"log/slog/internal/benchmarks", "\rd\x03\x81\x01\x06\x03:\x11"}, + {"log/slog/internal/buffer", "\xbb\x02"}, + {"log/syslog", "q\x03\x85\x01\x12\x16\x18\x02\x0e"}, + {"maps", "\xf8\x01W"}, + {"math", "\xb1\x01SK"}, + {"math/big", "\x03n\x03(\x15D\f\x03\x020\x02\x01\x02\x14"}, + {"math/big/internal/asmgen", "\x03\x01p\x90\x012\x03"}, + {"math/bits", "\xcf\x02"}, + {"math/cmplx", "\x81\x02\x03"}, + {"math/rand", "\xb9\x01H:\x01\x13"}, + {"math/rand/v2", "q+\x03b\x03K"}, + {"mime", "\x02\x01f\b\x03\x81\x01\v!\x15\x03\x02\x10\x02"}, + {"mime/multipart", "\x02\x01K#\x03E<\v\x01\a\x02\x15\x02\x06\x0f\x02\x01\x16"}, + {"mime/quotedprintable", "\x02\x01q\x81\x01"}, + {"net", "\x04\td*\x1e\n\x05\x12\x01\x01\x04\x15\x01%\x06\r\b\x05\x01\x01\f\x06\a"}, + {"net/http", "\x02\x01\x03\x01\x04\x02A\b\x13\x01\a\x03E<\x01\x03\a\x01\x03\x02\x02\x01\x02\x06\x02\x01\x01\n\x01\x01\x05\x01\x02\x05\b\x01\x01\x01\x02\x01\r\x02\x02\x02\b\x01\x01\x01"}, + {"net/http/cgi", "\x02T\x1b\x03\x81\x01\x04\a\v\x01\x13\x01\x01\x01\x04\x01\x05\x02\b\x02\x01\x10\x0e"}, + {"net/http/cookiejar", "\x04m\x03\x97\x01\x01\b\f\x16\x03\x02\x0e\x04"}, + {"net/http/fcgi", "\x02\x01\n]\a\x03\x81\x01\x16\x01\x01\x14\x18\x02\x0e"}, + {"net/http/httptest", "\x02\x01\nI\x02\x1b\x01\x81\x01\x04\x12\x01\n\t\x02\x17\x01\x02\x0e\x0e"}, + {"net/http/httptrace", "\rImH\x14\n "}, + {"net/http/httputil", "\x02\x01\nd\x03\x81\x01\x04\x0f\x03\x01\x05\x02\x01\v\x01\x19\x02\x0e\x0e"}, + {"net/http/internal", "\x02\x01n\x03\x81\x01"}, + {"net/http/internal/ascii", "\xb9\x02\x12"}, + {"net/http/internal/httpcommon", "\rd\x03\x9d\x01\x0e\x01\x17\x01\x01\x02\x1c\x02"}, + {"net/http/internal/testcert", "\xb9\x02"}, + {"net/http/pprof", "\x02\x01\ng\x18-\x02\x0e,\x04\x13\x14\x01\r\x04\x03\x01\x02\x01\x10"}, {"net/internal/cgotest", ""}, - {"net/internal/socktest", "q\xc6\x01\x02"}, - {"net/mail", "\x02l\x03\x80\x01\x04\x0f\x03\x14\x1a\x02\x0e\x04"}, - {"net/netip", "\x04j*\x01$@\x034\x16"}, - {"net/rpc", "\x02g\x05\x03\x0f\ng\x04\x12\x01\x1d\r\x03\x02"}, - {"net/rpc/jsonrpc", "k\x03\x03\x80\x01\x16\x11\x1f"}, - {"net/smtp", "\x19/\v\x13\b\x03\x80\x01\x16\x14\x1a"}, - {"net/textproto", "\x02\x01k\x03\x80\x01\f\n-\x01\x02\x14"}, - {"net/url", "n\x03\x8b\x01&\x10\x02\x01\x16"}, - {"os", "n*\x01\x19\x03\b\t\x12\x03\x01\x05\x10\x018\b\x05\x01\x01\f\x06"}, - {"os/exec", "\x03\naH%\x01\x15\x01+\x06\a\n\x01\x04\f"}, - {"os/exec/internal/fdtest", "\xb9\x02"}, - {"os/signal", "\r\x90\x02\x15\x05\x02"}, - {"os/user", "\x02\x01k\x03\x80\x01,\r\n\x01\x02"}, - {"path", "n*\xb1\x01"}, - {"path/filepath", "n*\x1a@+\r\b\x03\x04\x10"}, - {"plugin", "n"}, - {"reflect", "n&\x04\x1d\b\f\x06\x04\x1b\x06\t-\n\x03\x10\x02\x02"}, + {"net/internal/socktest", "t\xc7\x01\x02"}, + {"net/mail", "\x02o\x03\x81\x01\x04\x0f\x03\x14\x1a\x02\x0e\x04"}, + {"net/netip", "\x04m*\x01e\x034\x16"}, + {"net/rpc", "\x02j\x05\x03\x0f\nh\x04\x12\x01\x1d\r\x03\x02"}, + {"net/rpc/jsonrpc", "n\x03\x03\x81\x01\x16\x11\x1f"}, + {"net/smtp", "\x192\v\x13\b\x03\x81\x01\x16\x14\x1a"}, + {"net/textproto", "\x02\x01n\x03\x81\x01\f\n-\x01\x02\x14"}, + {"net/url", "q\x03\xa7\x01\v\x10\x02\x01\x16"}, + {"os", "q*\x01\x19\x03\x10\x13\x01\x03\x01\x05\x10\x018\b\x05\x01\x01\f\x06"}, + {"os/exec", "\x03\ndH&\x01\x15\x01+\x06\a\n\x01\x04\f"}, + {"os/exec/internal/fdtest", "\xbd\x02"}, + {"os/signal", "\r\x94\x02\x15\x05\x02"}, + {"os/user", "\x02\x01n\x03\x81\x01,\r\n\x01\x02"}, + {"path", "q*\xb2\x01"}, + {"path/filepath", "q*\x1aA+\r\b\x03\x04\x10"}, + {"plugin", "q"}, + {"reflect", "q&\x04\x1d\x13\b\x03\x05\x17\x06\t-\n\x03\x10\x02\x02"}, {"reflect/internal/example1", ""}, {"reflect/internal/example2", ""}, - {"regexp", "\x03\xee\x018\t\x02\x01\x02\x10\x02"}, - {"regexp/syntax", "\xb2\x02\x01\x01\x01\x02\x10\x02"}, - {"runtime", "\x94\x01\x04\x01\x03\f\x06\a\x02\x01\x01\x0f\x03\x01\x01\x01\x01\x01\x02\x01\x01\x04\x10c"}, - {"runtime/coverage", "\xa0\x01Q"}, - {"runtime/debug", "qUW\r\b\x02\x01\x10\x06"}, - {"runtime/metrics", "\xb7\x01F-!"}, - {"runtime/pprof", "\x02\x01\x01\x03\x06Z\a\x03#4)\f \r\b\x01\x01\x01\x02\x02\t\x03\x06"}, - {"runtime/race", "\xb0\x02"}, + {"regexp", "\x03\xf2\x018\t\x02\x01\x02\x10\x02"}, + {"regexp/syntax", "\xb6\x02\x01\x01\x01\x02\x10\x02"}, + {"runtime", "\x97\x01\x04\x01\x03\f\x06\a\x02\x01\x01\x0e\x03\x01\x01\x01\x02\x01\x01\x02\x01\x04\x01\x10c"}, + {"runtime/coverage", "\xa3\x01R"}, + {"runtime/debug", "tTY\r\b\x02\x01\x10\x06"}, + {"runtime/metrics", "\xba\x01G-!"}, + {"runtime/pprof", "\x02\x01\x01\x03\x06]\a\x03#$\x0f+\f \r\b\x01\x01\x01\x02\x02\t\x03\x06"}, + {"runtime/race", "\xb4\x02"}, {"runtime/race/internal/amd64v1", ""}, - {"runtime/trace", "\ra\x03w\t9\b\x05\x01\r\x06"}, - {"slices", "\x04\xf0\x01\fK"}, - {"sort", "\xca\x0162"}, - {"strconv", "n*@%\x03I"}, - {"strings", "n&\x04@\x19\x03\f7\x10\x02\x02"}, + {"runtime/trace", "\rd\x03x\t9\b\x05\x01\r\x06"}, + {"slices", "\x04\xf4\x01\fK"}, + {"sort", "\xcc\x0182"}, + {"strconv", "q*@\x01q"}, + {"strings", "q&\x04A\x19\x03\f7\x10\x02\x02"}, {"structs", ""}, - {"sync", "\xc9\x01\x10\x01P\x0e\x13"}, - {"sync/atomic", "\xcb\x02"}, - {"syscall", "n'\x03\x01\x1c\b\x03\x03\x06\vV\b\x05\x01\x13"}, - {"testing", "\x03\na\x02\x01X\x14\x14\f\x05\x1b\x06\x02\x05\x02\x05\x01\x02\x01\x02\x01\r\x02\x02\x02"}, - {"testing/fstest", "n\x03\x80\x01\x01\n&\x10\x03\b\b"}, - {"testing/internal/testdeps", "\x02\v\xa7\x01-\x10,\x03\x05\x03\x06\a\x02\x0e"}, - {"testing/iotest", "\x03k\x03\x80\x01\x04"}, - {"testing/quick", "p\x01\x8c\x01\x05#\x10\x10"}, - {"testing/slogtest", "\ra\x03\x86\x01.\x05\x10\v"}, - {"testing/synctest", "\xda\x01`\x11"}, - {"text/scanner", "\x03n\x80\x01,*\x02"}, - {"text/tabwriter", "q\x80\x01X"}, - {"text/template", "n\x03B>\x01\n \x01\x05\x01\x02\x05\v\x02\r\x03\x02"}, - {"text/template/parse", "\x03n\xb9\x01\n\x01\x12\x02"}, - {"time", "n*\x1e\"(*\r\x02\x12"}, - {"time/tzdata", "n\xcb\x01\x12"}, + {"sync", "\xcb\x01\x12\x01P\x0e\x13"}, + {"sync/atomic", "\xcf\x02"}, + {"syscall", "q'\x03\x01\x1c\n\x03\x06\f\x04S\b\x05\x01\x13"}, + {"testing", "\x03\nd\x02\x01W\x16\x14\f\x05\x1b\x06\x02\x05\x02\x05\x01\x02\x01\x02\x01\r\x02\x04"}, + {"testing/fstest", "q\x03\x81\x01\x01\n&\x10\x03\b\b"}, + {"testing/internal/testdeps", "\x02\v\xaa\x01.\x10,\x03\x05\x03\x06\a\x02\x0e"}, + {"testing/iotest", "\x03n\x03\x81\x01\x04"}, + {"testing/quick", "s\x01\x8d\x01\x05#\x10\x10"}, + {"testing/slogtest", "\rd\x03\x87\x01.\x05\x10\v"}, + {"testing/synctest", "\xde\x01`\x11"}, + {"text/scanner", "\x03q\x81\x01,*\x02"}, + {"text/tabwriter", "t\x81\x01X"}, + {"text/template", "q\x03B?\x01\n \x01\x05\x01\x02\x05\v\x02\r\x03\x02"}, + {"text/template/parse", "\x03q\xba\x01\n\x01\x12\x02"}, + {"time", "q*\x1e#(*\r\x02\x12"}, + {"time/tzdata", "q\xcc\x01\x12"}, {"unicode", ""}, {"unicode/utf16", ""}, {"unicode/utf8", ""}, - {"unique", "\x94\x01!#\x01Q\r\x01\x13\x12"}, + {"unique", "\x97\x01!$\x01Q\r\x01\x13\x12"}, {"unsafe", ""}, - {"vendor/golang.org/x/crypto/chacha20", "\x10W\a\x92\x01*&"}, - {"vendor/golang.org/x/crypto/chacha20poly1305", "\x10W\a\xde\x01\x04\x01\a"}, - {"vendor/golang.org/x/crypto/cryptobyte", "d\n\x03\x8d\x01' \n"}, + {"vendor/golang.org/x/crypto/chacha20", "\x10Z\a\x93\x01*&"}, + {"vendor/golang.org/x/crypto/chacha20poly1305", "\x10Z\a\xdf\x01\x04\x01\a"}, + {"vendor/golang.org/x/crypto/cryptobyte", "g\n\x03\x8e\x01' \n"}, {"vendor/golang.org/x/crypto/cryptobyte/asn1", ""}, - {"vendor/golang.org/x/crypto/internal/alias", "\xcb\x02"}, - {"vendor/golang.org/x/crypto/internal/poly1305", "R\x15\x99\x01"}, - {"vendor/golang.org/x/net/dns/dnsmessage", "n"}, - {"vendor/golang.org/x/net/http/httpguts", "\x87\x02\x14\x1a\x14\r"}, - {"vendor/golang.org/x/net/http/httpproxy", "n\x03\x96\x01\x10\x05\x01\x18\x14\r"}, - {"vendor/golang.org/x/net/http2/hpack", "\x03k\x03\x80\x01F"}, - {"vendor/golang.org/x/net/idna", "q\x8c\x018\x14\x10\x02\x01"}, - {"vendor/golang.org/x/net/nettest", "\x03d\a\x03\x80\x01\x11\x05\x16\x01\f\n\x01\x02\x02\x01\v"}, - {"vendor/golang.org/x/sys/cpu", "\x9d\x02\r\n\x01\x16"}, - {"vendor/golang.org/x/text/secure/bidirule", "n\xdb\x01\x11\x01"}, - {"vendor/golang.org/x/text/transform", "\x03k\x83\x01X"}, - {"vendor/golang.org/x/text/unicode/bidi", "\x03\bf\x84\x01>\x16"}, - {"vendor/golang.org/x/text/unicode/norm", "g\n\x80\x01F\x12\x11"}, - {"weak", "\x94\x01\x96\x01!"}, + {"vendor/golang.org/x/crypto/internal/alias", "\xcf\x02"}, + {"vendor/golang.org/x/crypto/internal/poly1305", "U\x15\x9a\x01"}, + {"vendor/golang.org/x/net/dns/dnsmessage", "q"}, + {"vendor/golang.org/x/net/http/httpguts", "\x8b\x02\x14\x1a\x14\r"}, + {"vendor/golang.org/x/net/http/httpproxy", "q\x03\x97\x01\x10\x05\x01\x18\x14\r"}, + {"vendor/golang.org/x/net/http2/hpack", "\x03n\x03\x81\x01F"}, + {"vendor/golang.org/x/net/idna", "t\x8d\x018\x14\x10\x02\x01"}, + {"vendor/golang.org/x/net/nettest", "\x03g\a\x03\x81\x01\x11\x05\x16\x01\f\n\x01\x02\x02\x01\v"}, + {"vendor/golang.org/x/sys/cpu", "\xa1\x02\r\n\x01\x16"}, + {"vendor/golang.org/x/text/secure/bidirule", "q\xdc\x01\x11\x01"}, + {"vendor/golang.org/x/text/transform", "\x03n\x84\x01X"}, + {"vendor/golang.org/x/text/unicode/bidi", "\x03\bi\x85\x01>\x16"}, + {"vendor/golang.org/x/text/unicode/norm", "j\n\x81\x01F\x12\x11"}, + {"weak", "\x97\x01\x97\x01!"}, } + +// bootstrap is the list of bootstrap packages extracted from cmd/dist. +var bootstrap = map[string]bool{ + "cmp": true, + "cmd/asm": true, + "cmd/asm/internal/arch": true, + "cmd/asm/internal/asm": true, + "cmd/asm/internal/flags": true, + "cmd/asm/internal/lex": true, + "cmd/cgo": true, + "cmd/compile": true, + "cmd/compile/internal/abi": true, + "cmd/compile/internal/abt": true, + "cmd/compile/internal/amd64": true, + "cmd/compile/internal/arm": true, + "cmd/compile/internal/arm64": true, + "cmd/compile/internal/base": true, + "cmd/compile/internal/bitvec": true, + "cmd/compile/internal/compare": true, + "cmd/compile/internal/coverage": true, + "cmd/compile/internal/deadlocals": true, + "cmd/compile/internal/devirtualize": true, + "cmd/compile/internal/dwarfgen": true, + "cmd/compile/internal/escape": true, + "cmd/compile/internal/gc": true, + "cmd/compile/internal/importer": true, + "cmd/compile/internal/inline": true, + "cmd/compile/internal/inline/inlheur": true, + "cmd/compile/internal/inline/interleaved": true, + "cmd/compile/internal/ir": true, + "cmd/compile/internal/liveness": true, + "cmd/compile/internal/logopt": true, + "cmd/compile/internal/loong64": true, + "cmd/compile/internal/loopvar": true, + "cmd/compile/internal/mips": true, + "cmd/compile/internal/mips64": true, + "cmd/compile/internal/noder": true, + "cmd/compile/internal/objw": true, + "cmd/compile/internal/pgoir": true, + "cmd/compile/internal/pkginit": true, + "cmd/compile/internal/ppc64": true, + "cmd/compile/internal/rangefunc": true, + "cmd/compile/internal/reflectdata": true, + "cmd/compile/internal/riscv64": true, + "cmd/compile/internal/rttype": true, + "cmd/compile/internal/s390x": true, + "cmd/compile/internal/ssa": true, + "cmd/compile/internal/ssagen": true, + "cmd/compile/internal/staticdata": true, + "cmd/compile/internal/staticinit": true, + "cmd/compile/internal/syntax": true, + "cmd/compile/internal/test": true, + "cmd/compile/internal/typebits": true, + "cmd/compile/internal/typecheck": true, + "cmd/compile/internal/types": true, + "cmd/compile/internal/types2": true, + "cmd/compile/internal/walk": true, + "cmd/compile/internal/wasm": true, + "cmd/compile/internal/x86": true, + "cmd/internal/archive": true, + "cmd/internal/bio": true, + "cmd/internal/codesign": true, + "cmd/internal/dwarf": true, + "cmd/internal/edit": true, + "cmd/internal/gcprog": true, + "cmd/internal/goobj": true, + "cmd/internal/hash": true, + "cmd/internal/macho": true, + "cmd/internal/obj": true, + "cmd/internal/obj/arm": true, + "cmd/internal/obj/arm64": true, + "cmd/internal/obj/loong64": true, + "cmd/internal/obj/mips": true, + "cmd/internal/obj/ppc64": true, + "cmd/internal/obj/riscv": true, + "cmd/internal/obj/s390x": true, + "cmd/internal/obj/wasm": true, + "cmd/internal/obj/x86": true, + "cmd/internal/objabi": true, + "cmd/internal/par": true, + "cmd/internal/pgo": true, + "cmd/internal/pkgpath": true, + "cmd/internal/quoted": true, + "cmd/internal/src": true, + "cmd/internal/sys": true, + "cmd/internal/telemetry": true, + "cmd/internal/telemetry/counter": true, + "cmd/link": true, + "cmd/link/internal/amd64": true, + "cmd/link/internal/arm": true, + "cmd/link/internal/arm64": true, + "cmd/link/internal/benchmark": true, + "cmd/link/internal/dwtest": true, + "cmd/link/internal/ld": true, + "cmd/link/internal/loadelf": true, + "cmd/link/internal/loader": true, + "cmd/link/internal/loadmacho": true, + "cmd/link/internal/loadpe": true, + "cmd/link/internal/loadxcoff": true, + "cmd/link/internal/loong64": true, + "cmd/link/internal/mips": true, + "cmd/link/internal/mips64": true, + "cmd/link/internal/ppc64": true, + "cmd/link/internal/riscv64": true, + "cmd/link/internal/s390x": true, + "cmd/link/internal/sym": true, + "cmd/link/internal/wasm": true, + "cmd/link/internal/x86": true, + "compress/flate": true, + "compress/zlib": true, + "container/heap": true, + "debug/dwarf": true, + "debug/elf": true, + "debug/macho": true, + "debug/pe": true, + "go/build/constraint": true, + "go/constant": true, + "go/version": true, + "internal/abi": true, + "internal/coverage": true, + "cmd/internal/cov/covcmd": true, + "internal/bisect": true, + "internal/buildcfg": true, + "internal/exportdata": true, + "internal/goarch": true, + "internal/godebugs": true, + "internal/goexperiment": true, + "internal/goroot": true, + "internal/gover": true, + "internal/goversion": true, + "internal/lazyregexp": true, + "internal/pkgbits": true, + "internal/platform": true, + "internal/profile": true, + "internal/race": true, + "internal/runtime/gc": true, + "internal/saferio": true, + "internal/syscall/unix": true, + "internal/types/errors": true, + "internal/unsafeheader": true, + "internal/xcoff": true, + "internal/zstd": true, + "math/bits": true, + "sort": true, +} + +// BootstrapVersion is the minor version of Go used during toolchain +// bootstrapping. Packages for which [IsBootstrapPackage] must not use +// features of Go newer than this version. +const BootstrapVersion = Version(24) // go1.24.6 diff --git a/vendor/golang.org/x/tools/internal/stdlib/import.go b/vendor/golang.org/x/tools/internal/stdlib/import.go index f6909878a8a..8ecc672b8b5 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/import.go +++ b/vendor/golang.org/x/tools/internal/stdlib/import.go @@ -87,3 +87,11 @@ func find(pkg string) (int, bool) { return strings.Compare(p.name, n) }) } + +// IsBootstrapPackage reports whether pkg is one of the low-level +// packages in the Go distribution that must compile with the older +// language version specified by [BootstrapVersion] during toolchain +// bootstrapping; see golang.org/s/go15bootstrap. +func IsBootstrapPackage(pkg string) bool { + return bootstrap[pkg] +} diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go index c1faa50d367..362f23c436c 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/manifest.go +++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go @@ -225,6 +225,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Buffer).Grow", Method, 1, ""}, {"(*Buffer).Len", Method, 0, ""}, {"(*Buffer).Next", Method, 0, ""}, + {"(*Buffer).Peek", Method, 26, ""}, {"(*Buffer).Read", Method, 0, ""}, {"(*Buffer).ReadByte", Method, 0, ""}, {"(*Buffer).ReadBytes", Method, 0, ""}, @@ -1628,6 +1629,7 @@ var PackageSymbols = map[string][]Symbol{ {"ResultNoRows", Var, 0, ""}, {"Rows", Type, 0, ""}, {"RowsAffected", Type, 0, ""}, + {"RowsColumnScanner", Type, 26, ""}, {"RowsColumnTypeDatabaseTypeName", Type, 8, ""}, {"RowsColumnTypeLength", Type, 8, ""}, {"RowsColumnTypeNullable", Type, 8, ""}, @@ -4953,6 +4955,7 @@ var PackageSymbols = map[string][]Symbol{ }, "errors": { {"As", Func, 13, "func(err error, target any) bool"}, + {"AsType", Func, 26, "func[E error](err error) (E, bool)"}, {"ErrUnsupported", Var, 21, ""}, {"Is", Func, 13, "func(err error, target error) bool"}, {"Join", Func, 20, "func(errs ...error) error"}, @@ -5090,7 +5093,7 @@ var PackageSymbols = map[string][]Symbol{ {"Append", Func, 19, "func(b []byte, a ...any) []byte"}, {"Appendf", Func, 19, "func(b []byte, format string, a ...any) []byte"}, {"Appendln", Func, 19, "func(b []byte, a ...any) []byte"}, - {"Errorf", Func, 0, "func(format string, a ...any) error"}, + {"Errorf", Func, 0, "func(format string, a ...any) (err error)"}, {"FormatString", Func, 20, "func(state State, verb rune) string"}, {"Formatter", Type, 0, ""}, {"Fprint", Func, 0, "func(w io.Writer, a ...any) (n int, err error)"}, @@ -5155,6 +5158,9 @@ var PackageSymbols = map[string][]Symbol{ {"(*DeclStmt).Pos", Method, 0, ""}, {"(*DeferStmt).End", Method, 0, ""}, {"(*DeferStmt).Pos", Method, 0, ""}, + {"(*Directive).End", Method, 26, ""}, + {"(*Directive).ParseArgs", Method, 26, ""}, + {"(*Directive).Pos", Method, 26, ""}, {"(*Ellipsis).End", Method, 0, ""}, {"(*Ellipsis).Pos", Method, 0, ""}, {"(*EmptyStmt).End", Method, 0, ""}, @@ -5320,6 +5326,15 @@ var PackageSymbols = map[string][]Symbol{ {"DeferStmt", Type, 0, ""}, {"DeferStmt.Call", Field, 0, ""}, {"DeferStmt.Defer", Field, 0, ""}, + {"Directive", Type, 26, ""}, + {"Directive.Args", Field, 26, ""}, + {"Directive.ArgsPos", Field, 26, ""}, + {"Directive.Name", Field, 26, ""}, + {"Directive.Slash", Field, 26, ""}, + {"Directive.Tool", Field, 26, ""}, + {"DirectiveArg", Type, 26, ""}, + {"DirectiveArg.Arg", Field, 26, ""}, + {"DirectiveArg.Pos", Field, 26, ""}, {"Ellipsis", Type, 0, ""}, {"Ellipsis.Ellipsis", Field, 0, ""}, {"Ellipsis.Elt", Field, 0, ""}, @@ -5469,6 +5484,7 @@ var PackageSymbols = map[string][]Symbol{ {"ParenExpr.Lparen", Field, 0, ""}, {"ParenExpr.Rparen", Field, 0, ""}, {"ParenExpr.X", Field, 0, ""}, + {"ParseDirective", Func, 26, "func(pos token.Pos, c string) (Directive, bool)"}, {"Pkg", Const, 0, ""}, {"Preorder", Func, 23, "func(root Node) iter.Seq[Node]"}, {"PreorderStack", Func, 25, "func(root Node, stack []Node, f func(n Node, stack []Node) bool)"}, @@ -7271,6 +7287,10 @@ var PackageSymbols = map[string][]Symbol{ {"(*Logger).WarnContext", Method, 21, ""}, {"(*Logger).With", Method, 21, ""}, {"(*Logger).WithGroup", Method, 21, ""}, + {"(*MultiHandler).Enabled", Method, 26, ""}, + {"(*MultiHandler).Handle", Method, 26, ""}, + {"(*MultiHandler).WithAttrs", Method, 26, ""}, + {"(*MultiHandler).WithGroup", Method, 26, ""}, {"(*Record).Add", Method, 21, ""}, {"(*Record).AddAttrs", Method, 21, ""}, {"(*TextHandler).Enabled", Method, 21, ""}, @@ -7358,9 +7378,11 @@ var PackageSymbols = map[string][]Symbol{ {"LogValuer", Type, 21, ""}, {"Logger", Type, 21, ""}, {"MessageKey", Const, 21, ""}, + {"MultiHandler", Type, 26, ""}, {"New", Func, 21, "func(h Handler) *Logger"}, {"NewJSONHandler", Func, 21, "func(w io.Writer, opts *HandlerOptions) *JSONHandler"}, {"NewLogLogger", Func, 21, "func(h Handler, level Level) *log.Logger"}, + {"NewMultiHandler", Func, 26, "func(handlers ...Handler) *MultiHandler"}, {"NewRecord", Func, 21, "func(t time.Time, level Level, msg string, pc uintptr) Record"}, {"NewTextHandler", Func, 21, "func(w io.Writer, opts *HandlerOptions) *TextHandler"}, {"Record", Type, 21, ""}, @@ -7515,7 +7537,7 @@ var PackageSymbols = map[string][]Symbol{ {"MinInt64", Const, 0, ""}, {"MinInt8", Const, 0, ""}, {"Mod", Func, 0, "func(x float64, y float64) float64"}, - {"Modf", Func, 0, "func(f float64) (int float64, frac float64)"}, + {"Modf", Func, 0, "func(f float64) (integer float64, fractional float64)"}, {"NaN", Func, 0, "func() float64"}, {"Nextafter", Func, 0, "func(x float64, y float64) (r float64)"}, {"Nextafter32", Func, 4, "func(x float32, y float32) (r float32)"}, @@ -7972,6 +7994,10 @@ var PackageSymbols = map[string][]Symbol{ {"(*DNSError).Unwrap", Method, 23, ""}, {"(*Dialer).Dial", Method, 1, ""}, {"(*Dialer).DialContext", Method, 7, ""}, + {"(*Dialer).DialIP", Method, 26, ""}, + {"(*Dialer).DialTCP", Method, 26, ""}, + {"(*Dialer).DialUDP", Method, 26, ""}, + {"(*Dialer).DialUnix", Method, 26, ""}, {"(*Dialer).MultipathTCP", Method, 21, ""}, {"(*Dialer).SetMultipathTCP", Method, 21, ""}, {"(*IP).UnmarshalText", Method, 2, ""}, @@ -8457,6 +8483,7 @@ var PackageSymbols = map[string][]Symbol{ {"HTTP2Config.PermitProhibitedCipherSuites", Field, 24, ""}, {"HTTP2Config.PingTimeout", Field, 24, ""}, {"HTTP2Config.SendPingTimeout", Field, 24, ""}, + {"HTTP2Config.StrictMaxConcurrentRequests", Field, 26, ""}, {"HTTP2Config.WriteByteTimeout", Field, 24, ""}, {"Handle", Func, 0, "func(pattern string, handler Handler)"}, {"HandleFunc", Func, 0, "func(pattern string, handler func(ResponseWriter, *Request))"}, @@ -8904,6 +8931,7 @@ var PackageSymbols = map[string][]Symbol{ {"(Prefix).AppendText", Method, 24, ""}, {"(Prefix).AppendTo", Method, 18, ""}, {"(Prefix).Bits", Method, 18, ""}, + {"(Prefix).Compare", Method, 26, ""}, {"(Prefix).Contains", Method, 18, ""}, {"(Prefix).IsSingleIP", Method, 18, ""}, {"(Prefix).IsValid", Method, 18, ""}, @@ -9177,6 +9205,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Process).Release", Method, 0, ""}, {"(*Process).Signal", Method, 0, ""}, {"(*Process).Wait", Method, 0, ""}, + {"(*Process).WithHandle", Method, 26, ""}, {"(*ProcessState).ExitCode", Method, 12, ""}, {"(*ProcessState).Exited", Method, 0, ""}, {"(*ProcessState).Pid", Method, 0, ""}, @@ -9234,6 +9263,7 @@ var PackageSymbols = map[string][]Symbol{ {"ErrExist", Var, 0, ""}, {"ErrInvalid", Var, 0, ""}, {"ErrNoDeadline", Var, 10, ""}, + {"ErrNoHandle", Var, 26, ""}, {"ErrNotExist", Var, 0, ""}, {"ErrPermission", Var, 0, ""}, {"ErrProcessDone", Var, 16, ""}, @@ -9461,7 +9491,7 @@ var PackageSymbols = map[string][]Symbol{ {"ListSeparator", Const, 0, ""}, {"Localize", Func, 23, "func(path string) (string, error)"}, {"Match", Func, 0, "func(pattern string, name string) (matched bool, err error)"}, - {"Rel", Func, 0, "func(basepath string, targpath string) (string, error)"}, + {"Rel", Func, 0, "func(basePath string, targPath string) (string, error)"}, {"Separator", Const, 0, ""}, {"SkipAll", Var, 20, ""}, {"SkipDir", Var, 0, ""}, @@ -9932,7 +9962,7 @@ var PackageSymbols = map[string][]Symbol{ {"PanicNilError", Type, 21, ""}, {"Pinner", Type, 21, ""}, {"ReadMemStats", Func, 0, "func(m *MemStats)"}, - {"ReadTrace", Func, 5, "func() []byte"}, + {"ReadTrace", Func, 5, "func() (buf []byte)"}, {"SetBlockProfileRate", Func, 1, "func(rate int)"}, {"SetCPUProfileRate", Func, 0, "func(hz int)"}, {"SetCgoTraceback", Func, 7, "func(version int, traceback unsafe.Pointer, context unsafe.Pointer, symbolizer unsafe.Pointer)"}, @@ -16679,6 +16709,7 @@ var PackageSymbols = map[string][]Symbol{ {"ValueOf", Func, 0, ""}, }, "testing": { + {"(*B).ArtifactDir", Method, 26, ""}, {"(*B).Attr", Method, 25, ""}, {"(*B).Chdir", Method, 24, ""}, {"(*B).Cleanup", Method, 14, ""}, @@ -16713,6 +16744,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*B).StopTimer", Method, 0, ""}, {"(*B).TempDir", Method, 15, ""}, {"(*F).Add", Method, 18, ""}, + {"(*F).ArtifactDir", Method, 26, ""}, {"(*F).Attr", Method, 25, ""}, {"(*F).Chdir", Method, 24, ""}, {"(*F).Cleanup", Method, 18, ""}, @@ -16738,6 +16770,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*F).TempDir", Method, 18, ""}, {"(*M).Run", Method, 4, ""}, {"(*PB).Next", Method, 3, ""}, + {"(*T).ArtifactDir", Method, 26, ""}, {"(*T).Attr", Method, 25, ""}, {"(*T).Chdir", Method, 24, ""}, {"(*T).Cleanup", Method, 14, ""}, diff --git a/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/vendor/golang.org/x/tools/internal/typeparams/normalize.go index f49802b8ef7..8d13f12147f 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/normalize.go +++ b/vendor/golang.org/x/tools/internal/typeparams/normalize.go @@ -160,8 +160,7 @@ func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth in // The term set of an interface is the intersection of the term sets of its // embedded types. tset.terms = allTermlist - for i := 0; i < u.NumEmbeddeds(); i++ { - embedded := u.EmbeddedType(i) + for embedded := range u.EmbeddedTypes() { if _, ok := embedded.Underlying().(*types.TypeParam); ok { return nil, fmt.Errorf("invalid embedded type %T", embedded) } @@ -174,8 +173,7 @@ func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth in case *types.Union: // The term set of a union is the union of term sets of its terms. tset.terms = nil - for i := 0; i < u.Len(); i++ { - t := u.Term(i) + for t := range u.Terms() { var terms termlist switch t.Type().Underlying().(type) { case *types.Interface: diff --git a/vendor/golang.org/x/tools/internal/typesinternal/element.go b/vendor/golang.org/x/tools/internal/typesinternal/element.go index 4957f021641..5fe4d8abcb5 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/element.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/element.go @@ -35,8 +35,8 @@ func ForEachElement(rtypes *typeutil.Map, msets *typeutil.MethodSetCache, T type // Recursion over signatures of each method. tmset := msets.MethodSet(T) - for i := 0; i < tmset.Len(); i++ { - sig := tmset.At(i).Type().(*types.Signature) + for method := range tmset.Methods() { + sig := method.Type().(*types.Signature) // It is tempting to call visit(sig, false) // but, as noted in golang.org/cl/65450043, // the Signature.Recv field is ignored by diff --git a/vendor/golang.org/x/tools/internal/typesinternal/fx.go b/vendor/golang.org/x/tools/internal/typesinternal/fx.go index 93acff21701..c846a53d5fe 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/fx.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/fx.go @@ -19,25 +19,46 @@ func NoEffects(info *types.Info, expr ast.Expr) bool { switch v := n.(type) { case nil, *ast.Ident, *ast.BasicLit, *ast.BinaryExpr, *ast.ParenExpr, *ast.SelectorExpr, *ast.IndexExpr, *ast.SliceExpr, *ast.TypeAssertExpr, - *ast.StarExpr, *ast.CompositeLit, *ast.ArrayType, *ast.StructType, - *ast.MapType, *ast.InterfaceType, *ast.KeyValueExpr: - // No effect + *ast.StarExpr, *ast.CompositeLit, + // non-expressions that may appear within expressions + *ast.KeyValueExpr, + *ast.FieldList, + *ast.Field, + *ast.Ellipsis, + *ast.IndexListExpr: + // No effect. + + case *ast.ArrayType, + *ast.StructType, + *ast.ChanType, + *ast.FuncType, + *ast.MapType, + *ast.InterfaceType: + // Type syntax: no effects, recursively. + // Prune descent. + return false + case *ast.UnaryExpr: - // Channel send <-ch has effects + // Channel send <-ch has effects. if v.Op == token.ARROW { noEffects = false } - case *ast.CallExpr: - // Type conversion has no effects - if !info.Types[v.Fun].IsType() { - // TODO(adonovan): Add a case for built-in functions without side - // effects (by using callsPureBuiltin from tools/internal/refactor/inline) - noEffects = false + case *ast.CallExpr: + // Type conversion has no effects. + if !info.Types[v.Fun].IsType() { + if CallsPureBuiltin(info, v) { + // A call such as len(e) has no effects of its + // own, though the subexpression e might. + } else { + noEffects = false + } } + case *ast.FuncLit: // A FuncLit has no effects, but do not descend into it. return false + default: // All other expressions have effects noEffects = false @@ -47,3 +68,21 @@ func NoEffects(info *types.Info, expr ast.Expr) bool { }) return noEffects } + +// CallsPureBuiltin reports whether call is a call of a built-in +// function that is a pure computation over its operands (analogous to +// a + operator). Because it does not depend on program state, it may +// be evaluated at any point--though not necessarily at multiple +// points (consider new, make). +func CallsPureBuiltin(info *types.Info, call *ast.CallExpr) bool { + if id, ok := ast.Unparen(call.Fun).(*ast.Ident); ok { + if b, ok := info.ObjectOf(id).(*types.Builtin); ok { + switch b.Name() { + case "len", "cap", "complex", "imag", "real", "make", "new", "max", "min": + return true + } + // Not: append clear close copy delete panic print println recover + } + } + return false +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go b/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go index f2affec4fba..e0d63c46c6a 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go @@ -48,7 +48,7 @@ func IsFunctionNamed(obj types.Object, pkgPath string, names ...string) bool { return ok && IsPackageLevel(obj) && f.Pkg().Path() == pkgPath && - f.Type().(*types.Signature).Recv() == nil && + f.Signature().Recv() == nil && slices.Contains(names, f.Name()) } @@ -60,7 +60,7 @@ func IsFunctionNamed(obj types.Object, pkgPath string, names ...string) bool { // which is important for the performance of syntax matching. func IsMethodNamed(obj types.Object, pkgPath string, typeName string, names ...string) bool { if fn, ok := obj.(*types.Func); ok { - if recv := fn.Type().(*types.Signature).Recv(); recv != nil { + if recv := fn.Signature().Recv(); recv != nil { _, T := ReceiverNamed(recv) return T != nil && IsTypeNamed(T, pkgPath, typeName) && diff --git a/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go b/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go index 64f47919f02..4e2756fc491 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go @@ -19,7 +19,7 @@ import ( // TODO(adonovan): this function ignores the effect of shadowing. It // should accept a [token.Pos] and a [types.Info] and compute only the // set of imports that are not shadowed at that point, analogous to -// [analysisinternal.AddImport]. It could also compute (as a side +// [analysis.AddImport]. It could also compute (as a side // effect) the set of additional imports required to ensure that there // is an accessible import for each necessary package, making it // converge even more closely with AddImport. diff --git a/vendor/golang.org/x/tools/internal/typesinternal/varkind.go b/vendor/golang.org/x/tools/internal/typesinternal/varkind.go index e5da0495111..26499cdd2e7 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/varkind.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/varkind.go @@ -2,39 +2,22 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package typesinternal +//go:build go1.25 -// TODO(adonovan): when CL 645115 lands, define the go1.25 version of -// this API that actually does something. +package typesinternal import "go/types" -type VarKind uint8 +type VarKind = types.VarKind const ( - _ VarKind = iota // (not meaningful) - PackageVar // a package-level variable - LocalVar // a local variable - RecvVar // a method receiver variable - ParamVar // a function parameter variable - ResultVar // a function result variable - FieldVar // a struct field + PackageVar = types.PackageVar + LocalVar = types.LocalVar + RecvVar = types.RecvVar + ParamVar = types.ParamVar + ResultVar = types.ResultVar + FieldVar = types.FieldVar ) -func (kind VarKind) String() string { - return [...]string{ - 0: "VarKind(0)", - PackageVar: "PackageVar", - LocalVar: "LocalVar", - RecvVar: "RecvVar", - ParamVar: "ParamVar", - ResultVar: "ResultVar", - FieldVar: "FieldVar", - }[kind] -} - -// GetVarKind returns an invalid VarKind. -func GetVarKind(v *types.Var) VarKind { return 0 } - -// SetVarKind has no effect. -func SetVarKind(v *types.Var, kind VarKind) {} +func GetVarKind(v *types.Var) VarKind { return v.Kind() } +func SetVarKind(v *types.Var, kind VarKind) { v.SetKind(kind) } diff --git a/vendor/golang.org/x/tools/internal/typesinternal/varkind_go124.go b/vendor/golang.org/x/tools/internal/typesinternal/varkind_go124.go new file mode 100644 index 00000000000..17b1804b4e8 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/varkind_go124.go @@ -0,0 +1,39 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.25 + +package typesinternal + +import "go/types" + +type VarKind uint8 + +const ( + _ VarKind = iota // (not meaningful) + PackageVar // a package-level variable + LocalVar // a local variable + RecvVar // a method receiver variable + ParamVar // a function parameter variable + ResultVar // a function result variable + FieldVar // a struct field +) + +func (kind VarKind) String() string { + return [...]string{ + 0: "VarKind(0)", + PackageVar: "PackageVar", + LocalVar: "LocalVar", + RecvVar: "RecvVar", + ParamVar: "ParamVar", + ResultVar: "ResultVar", + FieldVar: "FieldVar", + }[kind] +} + +// GetVarKind returns an invalid VarKind. +func GetVarKind(v *types.Var) VarKind { return 0 } + +// SetVarKind has no effect. +func SetVarKind(v *types.Var, kind VarKind) {} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go index 453bba2ad5e..d612a710297 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go @@ -258,12 +258,12 @@ func TypeExpr(t types.Type, qual types.Qualifier) ast.Expr { case *types.Signature: var params []*ast.Field - for i := 0; i < t.Params().Len(); i++ { + for v := range t.Params().Variables() { params = append(params, &ast.Field{ - Type: TypeExpr(t.Params().At(i).Type(), qual), + Type: TypeExpr(v.Type(), qual), Names: []*ast.Ident{ { - Name: t.Params().At(i).Name(), + Name: v.Name(), }, }, }) @@ -273,9 +273,9 @@ func TypeExpr(t types.Type, qual types.Qualifier) ast.Expr { last.Type = &ast.Ellipsis{Elt: last.Type.(*ast.ArrayType).Elt} } var returns []*ast.Field - for i := 0; i < t.Results().Len(); i++ { + for v := range t.Results().Variables() { returns = append(returns, &ast.Field{ - Type: TypeExpr(t.Results().At(i).Type(), qual), + Type: TypeExpr(v.Type(), qual), }) } return &ast.FuncType{ @@ -315,8 +315,8 @@ func TypeExpr(t types.Type, qual types.Qualifier) ast.Expr { if hasTypeArgs, ok := t.(interface{ TypeArgs() *types.TypeList }); ok { if typeArgs := hasTypeArgs.TypeArgs(); typeArgs != nil && typeArgs.Len() > 0 { var indices []ast.Expr - for i := range typeArgs.Len() { - indices = append(indices, TypeExpr(typeArgs.At(i), qual)) + for t0 := range typeArgs.Types() { + indices = append(indices, TypeExpr(t0, qual)) } expr = &ast.IndexListExpr{ X: expr, diff --git a/vendor/golang.org/x/tools/internal/versions/features.go b/vendor/golang.org/x/tools/internal/versions/features.go index b53f1786161..a5f4e3252cc 100644 --- a/vendor/golang.org/x/tools/internal/versions/features.go +++ b/vendor/golang.org/x/tools/internal/versions/features.go @@ -7,13 +7,17 @@ package versions // This file contains predicates for working with file versions to // decide when a tool should consider a language feature enabled. -// GoVersions that features in x/tools can be gated to. +// named constants, to avoid misspelling const ( Go1_18 = "go1.18" Go1_19 = "go1.19" Go1_20 = "go1.20" Go1_21 = "go1.21" Go1_22 = "go1.22" + Go1_23 = "go1.23" + Go1_24 = "go1.24" + Go1_25 = "go1.25" + Go1_26 = "go1.26" ) // Future is an invalid unknown Go version sometime in the future. diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go index b15c10e46b0..b4bc3a2bf36 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -16,55 +16,124 @@ * */ -// Package pickfirst contains the pick_first load balancing policy. +// Package pickfirst contains the pick_first load balancing policy which +// is the universal leaf policy. package pickfirst import ( "encoding/json" "errors" "fmt" - rand "math/rand/v2" + "net" + "net/netip" + "sync" + "time" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/pickfirst/internal" "google.golang.org/grpc/connectivity" + expstats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/envconfig" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" - - _ "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" // For automatically registering the new pickfirst if required. ) func init() { - if envconfig.NewPickFirstEnabled { - return - } balancer.Register(pickfirstBuilder{}) } -var logger = grpclog.Component("pick-first-lb") +// Name is the name of the pick_first balancer. +const Name = "pick_first" + +// enableHealthListenerKeyType is a unique key type used in resolver +// attributes to indicate whether the health listener usage is enabled. +type enableHealthListenerKeyType struct{} + +var ( + logger = grpclog.Component("pick-first-leaf-lb") + disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.disconnections", + Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.", + Unit: "{disconnection}", + Labels: []string{"grpc.target"}, + Default: false, + }) + connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.connection_attempts_succeeded", + Description: "EXPERIMENTAL. Number of successful connection attempts.", + Unit: "{attempt}", + Labels: []string{"grpc.target"}, + Default: false, + }) + connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.connection_attempts_failed", + Description: "EXPERIMENTAL. Number of failed connection attempts.", + Unit: "{attempt}", + Labels: []string{"grpc.target"}, + Default: false, + }) +) const ( - // Name is the name of the pick_first balancer. - Name = "pick_first" - logPrefix = "[pick-first-lb %p] " + // TODO: change to pick-first when this becomes the default pick_first policy. + logPrefix = "[pick-first-leaf-lb %p] " + // connectionDelayInterval is the time to wait for during the happy eyeballs + // pass before starting the next connection attempt. + connectionDelayInterval = 250 * time.Millisecond +) + +type ipAddrFamily int + +const ( + // ipAddrFamilyUnknown represents strings that can't be parsed as an IP + // address. + ipAddrFamilyUnknown ipAddrFamily = iota + ipAddrFamilyV4 + ipAddrFamilyV6 ) type pickfirstBuilder struct{} -func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { - b := &pickfirstBalancer{cc: cc} +func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) balancer.Balancer { + b := &pickfirstBalancer{ + cc: cc, + target: bo.Target.String(), + metricsRecorder: cc.MetricsRecorder(), + + subConns: resolver.NewAddressMapV2[*scData](), + state: connectivity.Connecting, + cancelConnectionTimer: func() {}, + } b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) return b } -func (pickfirstBuilder) Name() string { +func (b pickfirstBuilder) Name() string { return Name } +func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var cfg pfConfig + if err := json.Unmarshal(js, &cfg); err != nil { + return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) + } + return cfg, nil +} + +// EnableHealthListener updates the state to configure pickfirst for using a +// generic health listener. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later +// release. +func EnableHealthListener(state resolver.State) resolver.State { + state.Attributes = state.Attributes.WithValue(enableHealthListenerKeyType{}, true) + return state +} + type pfConfig struct { serviceconfig.LoadBalancingConfig `json:"-"` @@ -74,90 +143,129 @@ type pfConfig struct { ShuffleAddressList bool `json:"shuffleAddressList"` } -func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - var cfg pfConfig - if err := json.Unmarshal(js, &cfg); err != nil { - return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) +// scData keeps track of the current state of the subConn. +// It is not safe for concurrent access. +type scData struct { + // The following fields are initialized at build time and read-only after + // that. + subConn balancer.SubConn + addr resolver.Address + + rawConnectivityState connectivity.State + // The effective connectivity state based on raw connectivity, health state + // and after following sticky TransientFailure behaviour defined in A62. + effectiveState connectivity.State + lastErr error + connectionFailedInFirstPass bool +} + +func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { + sd := &scData{ + rawConnectivityState: connectivity.Idle, + effectiveState: connectivity.Idle, + addr: addr, } - return cfg, nil + sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{ + StateListener: func(state balancer.SubConnState) { + b.updateSubConnState(sd, state) + }, + }) + if err != nil { + return nil, err + } + sd.subConn = sc + return sd, nil } type pickfirstBalancer struct { - logger *internalgrpclog.PrefixLogger - state connectivity.State - cc balancer.ClientConn - subConn balancer.SubConn + // The following fields are initialized at build time and read-only after + // that and therefore do not need to be guarded by a mutex. + logger *internalgrpclog.PrefixLogger + cc balancer.ClientConn + target string + metricsRecorder expstats.MetricsRecorder // guaranteed to be non nil + + // The mutex is used to ensure synchronization of updates triggered + // from the idle picker and the already serialized resolver, + // SubConn state updates. + mu sync.Mutex + // State reported to the channel based on SubConn states and resolver + // updates. + state connectivity.State + // scData for active subonns mapped by address. + subConns *resolver.AddressMapV2[*scData] + addressList addressList + firstPass bool + numTF int + cancelConnectionTimer func() + healthCheckingEnabled bool } +// ResolverError is called by the ClientConn when the name resolver produces +// an error or when pickfirst determined the resolver update to be invalid. func (b *pickfirstBalancer) ResolverError(err error) { + b.mu.Lock() + defer b.mu.Unlock() + b.resolverErrorLocked(err) +} + +func (b *pickfirstBalancer) resolverErrorLocked(err error) { if b.logger.V(2) { b.logger.Infof("Received error from the name resolver: %v", err) } - if b.subConn == nil { - b.state = connectivity.TransientFailure - } - if b.state != connectivity.TransientFailure { - // The picker will not change since the balancer does not currently - // report an error. + // The picker will not change since the balancer does not currently + // report an error. If the balancer hasn't received a single good resolver + // update yet, transition to TRANSIENT_FAILURE. + if b.state != connectivity.TransientFailure && b.addressList.size() > 0 { + if b.logger.V(2) { + b.logger.Infof("Ignoring resolver error because balancer is using a previous good update.") + } return } - b.cc.UpdateState(balancer.State{ + + b.updateBalancerState(balancer.State{ ConnectivityState: connectivity.TransientFailure, Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, }) } -// Shuffler is an interface for shuffling an address list. -type Shuffler interface { - ShuffleAddressListForTesting(n int, swap func(i, j int)) -} - -// ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n -// is the number of elements. swap swaps the elements with indexes i and j. -func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } - func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + b.mu.Lock() + defer b.mu.Unlock() + b.cancelConnectionTimer() if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { - // The resolver reported an empty address list. Treat it like an error by - // calling b.ResolverError. - if b.subConn != nil { - // Shut down the old subConn. All addresses were removed, so it is - // no longer valid. - b.subConn.Shutdown() - b.subConn = nil - } - b.ResolverError(errors.New("produced zero addresses")) + // Cleanup state pertaining to the previous resolver state. + // Treat an empty address list like an error by calling b.ResolverError. + b.closeSubConnsLocked() + b.addressList.updateAddrs(nil) + b.resolverErrorLocked(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } - // We don't have to guard this block with the env var because ParseConfig - // already does so. + b.healthCheckingEnabled = state.ResolverState.Attributes.Value(enableHealthListenerKeyType{}) != nil cfg, ok := state.BalancerConfig.(pfConfig) if state.BalancerConfig != nil && !ok { - return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) + return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState) } if b.logger.V(2) { b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) } - var addrs []resolver.Address + var newAddrs []resolver.Address if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 { - // Perform the optional shuffling described in gRFC A62. The shuffling will - // change the order of endpoints but not touch the order of the addresses - // within each endpoint. - A61 + // Perform the optional shuffling described in gRFC A62. The shuffling + // will change the order of endpoints but not touch the order of the + // addresses within each endpoint. - A61 if cfg.ShuffleAddressList { endpoints = append([]resolver.Endpoint{}, endpoints...) internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) } - // "Flatten the list by concatenating the ordered list of addresses for each - // of the endpoints, in order." - A61 + // "Flatten the list by concatenating the ordered list of addresses for + // each of the endpoints, in order." - A61 for _, endpoint := range endpoints { - // "In the flattened list, interleave addresses from the two address - // families, as per RFC-8304 section 4." - A61 - // TODO: support the above language. - addrs = append(addrs, endpoint.Addresses...) + newAddrs = append(newAddrs, endpoint.Addresses...) } } else { // Endpoints not set, process addresses until we migrate resolver @@ -166,42 +274,53 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // target do not forward the corresponding correct endpoints down/split // endpoints properly. Once all balancers correctly forward endpoints // down, can delete this else conditional. - addrs = state.ResolverState.Addresses + newAddrs = state.ResolverState.Addresses if cfg.ShuffleAddressList { - addrs = append([]resolver.Address{}, addrs...) - internal.RandShuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) + newAddrs = append([]resolver.Address{}, newAddrs...) + internal.RandShuffle(len(newAddrs), func(i, j int) { newAddrs[i], newAddrs[j] = newAddrs[j], newAddrs[i] }) } } - if b.subConn != nil { - b.cc.UpdateAddresses(b.subConn, addrs) + // If an address appears in multiple endpoints or in the same endpoint + // multiple times, we keep it only once. We will create only one SubConn + // for the address because an AddressMap is used to store SubConns. + // Not de-duplicating would result in attempting to connect to the same + // SubConn multiple times in the same pass. We don't want this. + newAddrs = deDupAddresses(newAddrs) + newAddrs = interleaveAddresses(newAddrs) + + prevAddr := b.addressList.currentAddress() + prevSCData, found := b.subConns.Get(prevAddr) + prevAddrsCount := b.addressList.size() + isPrevRawConnectivityStateReady := found && prevSCData.rawConnectivityState == connectivity.Ready + b.addressList.updateAddrs(newAddrs) + + // If the previous ready SubConn exists in new address list, + // keep this connection and don't create new SubConns. + if isPrevRawConnectivityStateReady && b.addressList.seekTo(prevAddr) { return nil } - var subConn balancer.SubConn - subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{ - StateListener: func(state balancer.SubConnState) { - b.updateSubConnState(subConn, state) - }, - }) - if err != nil { - if b.logger.V(2) { - b.logger.Infof("Failed to create new SubConn: %v", err) - } - b.state = connectivity.TransientFailure - b.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, + b.reconcileSubConnsLocked(newAddrs) + // If it's the first resolver update or the balancer was already READY + // (but the new address list does not contain the ready SubConn) or + // CONNECTING, enter CONNECTING. + // We may be in TRANSIENT_FAILURE due to a previous empty address list, + // we should still enter CONNECTING because the sticky TF behaviour + // mentioned in A62 applies only when the TRANSIENT_FAILURE is reported + // due to connectivity failures. + if isPrevRawConnectivityStateReady || b.state == connectivity.Connecting || prevAddrsCount == 0 { + // Start connection attempt at first address. + b.forceUpdateConcludedStateLocked(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) - return balancer.ErrBadResolverState + b.startFirstPassLocked() + } else if b.state == connectivity.TransientFailure { + // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until + // we're READY. See A62. + b.startFirstPassLocked() } - b.subConn = subConn - b.state = connectivity.Idle - b.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.Connecting, - Picker: &picker{err: balancer.ErrNoSubConnAvailable}, - }) - b.subConn.Connect() return nil } @@ -211,63 +330,484 @@ func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state b b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) } -func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { - if b.logger.V(2) { - b.logger.Infof("Received SubConn state update: %p, %+v", subConn, state) - } - if b.subConn != subConn { - if b.logger.V(2) { - b.logger.Infof("Ignored state change because subConn is not recognized") - } - return - } - if state.ConnectivityState == connectivity.Shutdown { - b.subConn = nil - return - } +func (b *pickfirstBalancer) Close() { + b.mu.Lock() + defer b.mu.Unlock() + b.closeSubConnsLocked() + b.cancelConnectionTimer() + b.state = connectivity.Shutdown +} - switch state.ConnectivityState { - case connectivity.Ready: - b.cc.UpdateState(balancer.State{ - ConnectivityState: state.ConnectivityState, - Picker: &picker{result: balancer.PickResult{SubConn: subConn}}, - }) - case connectivity.Connecting: - if b.state == connectivity.TransientFailure { - // We stay in TransientFailure until we are Ready. See A62. - return - } - b.cc.UpdateState(balancer.State{ - ConnectivityState: state.ConnectivityState, +// ExitIdle moves the balancer out of idle state. It can be called concurrently +// by the idlePicker and clientConn so access to variables should be +// synchronized. +func (b *pickfirstBalancer) ExitIdle() { + b.mu.Lock() + defer b.mu.Unlock() + if b.state == connectivity.Idle { + // Move the balancer into CONNECTING state immediately. This is done to + // avoid staying in IDLE if a resolver update arrives before the first + // SubConn reports CONNECTING. + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) - case connectivity.Idle: - if b.state == connectivity.TransientFailure { - // We stay in TransientFailure until we are Ready. Also kick the - // subConn out of Idle into Connecting. See A62. - b.subConn.Connect() + b.startFirstPassLocked() + } +} + +func (b *pickfirstBalancer) startFirstPassLocked() { + b.firstPass = true + b.numTF = 0 + // Reset the connection attempt record for existing SubConns. + for _, sd := range b.subConns.Values() { + sd.connectionFailedInFirstPass = false + } + b.requestConnectionLocked() +} + +func (b *pickfirstBalancer) closeSubConnsLocked() { + for _, sd := range b.subConns.Values() { + sd.subConn.Shutdown() + } + b.subConns = resolver.NewAddressMapV2[*scData]() +} + +// deDupAddresses ensures that each address appears only once in the slice. +func deDupAddresses(addrs []resolver.Address) []resolver.Address { + seenAddrs := resolver.NewAddressMapV2[bool]() + retAddrs := []resolver.Address{} + + for _, addr := range addrs { + if _, ok := seenAddrs.Get(addr); ok { + continue + } + seenAddrs.Set(addr, true) + retAddrs = append(retAddrs, addr) + } + return retAddrs +} + +// interleaveAddresses interleaves addresses of both families (IPv4 and IPv6) +// as per RFC-8305 section 4. +// Whichever address family is first in the list is followed by an address of +// the other address family; that is, if the first address in the list is IPv6, +// then the first IPv4 address should be moved up in the list to be second in +// the list. It doesn't support configuring "First Address Family Count", i.e. +// there will always be a single member of the first address family at the +// beginning of the interleaved list. +// Addresses that are neither IPv4 nor IPv6 are treated as part of a third +// "unknown" family for interleaving. +// See: https://datatracker.ietf.org/doc/html/rfc8305#autoid-6 +func interleaveAddresses(addrs []resolver.Address) []resolver.Address { + familyAddrsMap := map[ipAddrFamily][]resolver.Address{} + interleavingOrder := []ipAddrFamily{} + for _, addr := range addrs { + family := addressFamily(addr.Addr) + if _, found := familyAddrsMap[family]; !found { + interleavingOrder = append(interleavingOrder, family) + } + familyAddrsMap[family] = append(familyAddrsMap[family], addr) + } + + interleavedAddrs := make([]resolver.Address, 0, len(addrs)) + + for curFamilyIdx := 0; len(interleavedAddrs) < len(addrs); curFamilyIdx = (curFamilyIdx + 1) % len(interleavingOrder) { + // Some IP types may have fewer addresses than others, so we look for + // the next type that has a remaining member to add to the interleaved + // list. + family := interleavingOrder[curFamilyIdx] + remainingMembers := familyAddrsMap[family] + if len(remainingMembers) > 0 { + interleavedAddrs = append(interleavedAddrs, remainingMembers[0]) + familyAddrsMap[family] = remainingMembers[1:] + } + } + + return interleavedAddrs +} + +// addressFamily returns the ipAddrFamily after parsing the address string. +// If the address isn't of the format "ip-address:port", it returns +// ipAddrFamilyUnknown. The address may be valid even if it's not an IP when +// using a resolver like passthrough where the address may be a hostname in +// some format that the dialer can resolve. +func addressFamily(address string) ipAddrFamily { + // Parse the IP after removing the port. + host, _, err := net.SplitHostPort(address) + if err != nil { + return ipAddrFamilyUnknown + } + ip, err := netip.ParseAddr(host) + if err != nil { + return ipAddrFamilyUnknown + } + switch { + case ip.Is4() || ip.Is4In6(): + return ipAddrFamilyV4 + case ip.Is6(): + return ipAddrFamilyV6 + default: + return ipAddrFamilyUnknown + } +} + +// reconcileSubConnsLocked updates the active subchannels based on a new address +// list from the resolver. It does this by: +// - closing subchannels: any existing subchannels associated with addresses +// that are no longer in the updated list are shut down. +// - removing subchannels: entries for these closed subchannels are removed +// from the subchannel map. +// +// This ensures that the subchannel map accurately reflects the current set of +// addresses received from the name resolver. +func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) { + newAddrsMap := resolver.NewAddressMapV2[bool]() + for _, addr := range newAddrs { + newAddrsMap.Set(addr, true) + } + + for _, oldAddr := range b.subConns.Keys() { + if _, ok := newAddrsMap.Get(oldAddr); ok { + continue + } + val, _ := b.subConns.Get(oldAddr) + val.subConn.Shutdown() + b.subConns.Delete(oldAddr) + } +} + +// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn +// becomes ready, which means that all other subConn must be shutdown. +func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) { + b.cancelConnectionTimer() + for _, sd := range b.subConns.Values() { + if sd.subConn != selected.subConn { + sd.subConn.Shutdown() + } + } + b.subConns = resolver.NewAddressMapV2[*scData]() + b.subConns.Set(selected.addr, selected) +} + +// requestConnectionLocked starts connecting on the subchannel corresponding to +// the current address. If no subchannel exists, one is created. If the current +// subchannel is in TransientFailure, a connection to the next address is +// attempted until a subchannel is found. +func (b *pickfirstBalancer) requestConnectionLocked() { + if !b.addressList.isValid() { + return + } + var lastErr error + for valid := true; valid; valid = b.addressList.increment() { + curAddr := b.addressList.currentAddress() + sd, ok := b.subConns.Get(curAddr) + if !ok { + var err error + // We want to assign the new scData to sd from the outer scope, + // hence we can't use := below. + sd, err = b.newSCData(curAddr) + if err != nil { + // This should never happen, unless the clientConn is being shut + // down. + if b.logger.V(2) { + b.logger.Infof("Failed to create a subConn for address %v: %v", curAddr.String(), err) + } + // Do nothing, the LB policy will be closed soon. + return + } + b.subConns.Set(curAddr, sd) + } + + switch sd.rawConnectivityState { + case connectivity.Idle: + sd.subConn.Connect() + b.scheduleNextConnectionLocked() + return + case connectivity.TransientFailure: + // The SubConn is being re-used and failed during a previous pass + // over the addressList. It has not completed backoff yet. + // Mark it as having failed and try the next address. + sd.connectionFailedInFirstPass = true + lastErr = sd.lastErr + continue + case connectivity.Connecting: + // Wait for the connection attempt to complete or the timer to fire + // before attempting the next address. + b.scheduleNextConnectionLocked() + return + default: + b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", sd.rawConnectivityState) + return + + } + } + + // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the + // first pass if possible. + b.endFirstPassIfPossibleLocked(lastErr) +} + +func (b *pickfirstBalancer) scheduleNextConnectionLocked() { + b.cancelConnectionTimer() + if !b.addressList.hasNext() { + return + } + curAddr := b.addressList.currentAddress() + cancelled := false // Access to this is protected by the balancer's mutex. + closeFn := internal.TimeAfterFunc(connectionDelayInterval, func() { + b.mu.Lock() + defer b.mu.Unlock() + // If the scheduled task is cancelled while acquiring the mutex, return. + if cancelled { return } - b.cc.UpdateState(balancer.State{ - ConnectivityState: state.ConnectivityState, - Picker: &idlePicker{subConn: subConn}, + if b.logger.V(2) { + b.logger.Infof("Happy Eyeballs timer expired while waiting for connection to %q.", curAddr.Addr) + } + if b.addressList.increment() { + b.requestConnectionLocked() + } + }) + // Access to the cancellation callback held by the balancer is guarded by + // the balancer's mutex, so it's safe to set the boolean from the callback. + b.cancelConnectionTimer = sync.OnceFunc(func() { + cancelled = true + closeFn() + }) +} + +func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) { + b.mu.Lock() + defer b.mu.Unlock() + oldState := sd.rawConnectivityState + sd.rawConnectivityState = newState.ConnectivityState + // Previously relevant SubConns can still callback with state updates. + // To prevent pickers from returning these obsolete SubConns, this logic + // is included to check if the current list of active SubConns includes this + // SubConn. + if !b.isActiveSCData(sd) { + return + } + if newState.ConnectivityState == connectivity.Shutdown { + sd.effectiveState = connectivity.Shutdown + return + } + + // Record a connection attempt when exiting CONNECTING. + if newState.ConnectivityState == connectivity.TransientFailure { + sd.connectionFailedInFirstPass = true + connectionAttemptsFailedMetric.Record(b.metricsRecorder, 1, b.target) + } + + if newState.ConnectivityState == connectivity.Ready { + connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) + b.shutdownRemainingLocked(sd) + if !b.addressList.seekTo(sd.addr) { + // This should not fail as we should have only one SubConn after + // entering READY. The SubConn should be present in the addressList. + b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses) + return + } + if !b.healthCheckingEnabled { + if b.logger.V(2) { + b.logger.Infof("SubConn %p reported connectivity state READY and the health listener is disabled. Transitioning SubConn to READY.", sd.subConn) + } + + sd.effectiveState = connectivity.Ready + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, + }) + return + } + if b.logger.V(2) { + b.logger.Infof("SubConn %p reported connectivity state READY. Registering health listener.", sd.subConn) + } + // Send a CONNECTING update to take the SubConn out of sticky-TF if + // required. + sd.effectiveState = connectivity.Connecting + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + sd.subConn.RegisterHealthListener(func(scs balancer.SubConnState) { + b.updateSubConnHealthState(sd, scs) + }) + return + } + + // If the LB policy is READY, and it receives a subchannel state change, + // it means that the READY subchannel has failed. + // A SubConn can also transition from CONNECTING directly to IDLE when + // a transport is successfully created, but the connection fails + // before the SubConn can send the notification for READY. We treat + // this as a successful connection and transition to IDLE. + // TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second + // part of the if condition below once the issue is fixed. + if oldState == connectivity.Ready || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) { + // Once a transport fails, the balancer enters IDLE and starts from + // the first address when the picker is used. + b.shutdownRemainingLocked(sd) + sd.effectiveState = newState.ConnectivityState + // READY SubConn interspliced in between CONNECTING and IDLE, need to + // account for that. + if oldState == connectivity.Connecting { + // A known issue (https://github.com/grpc/grpc-go/issues/7862) + // causes a race that prevents the READY state change notification. + // This works around it. + connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) + } + disconnectionsMetric.Record(b.metricsRecorder, 1, b.target) + b.addressList.reset() + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Idle, + Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)}, + }) + return + } + + if b.firstPass { + switch newState.ConnectivityState { + case connectivity.Connecting: + // The effective state can be in either IDLE, CONNECTING or + // TRANSIENT_FAILURE. If it's TRANSIENT_FAILURE, stay in + // TRANSIENT_FAILURE until it's READY. See A62. + if sd.effectiveState != connectivity.TransientFailure { + sd.effectiveState = connectivity.Connecting + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + } + case connectivity.TransientFailure: + sd.lastErr = newState.ConnectionError + sd.effectiveState = connectivity.TransientFailure + // Since we're re-using common SubConns while handling resolver + // updates, we could receive an out of turn TRANSIENT_FAILURE from + // a pass over the previous address list. Happy Eyeballs will also + // cause out of order updates to arrive. + + if curAddr := b.addressList.currentAddress(); equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) { + b.cancelConnectionTimer() + if b.addressList.increment() { + b.requestConnectionLocked() + return + } + } + + // End the first pass if we've seen a TRANSIENT_FAILURE from all + // SubConns once. + b.endFirstPassIfPossibleLocked(newState.ConnectionError) + } + return + } + + // We have finished the first pass, keep re-connecting failing SubConns. + switch newState.ConnectivityState { + case connectivity.TransientFailure: + b.numTF = (b.numTF + 1) % b.subConns.Len() + sd.lastErr = newState.ConnectionError + if b.numTF%b.subConns.Len() == 0 { + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: newState.ConnectionError}, + }) + } + // We don't need to request re-resolution since the SubConn already + // does that before reporting TRANSIENT_FAILURE. + // TODO: #7534 - Move re-resolution requests from SubConn into + // pick_first. + case connectivity.Idle: + sd.subConn.Connect() + } +} + +// endFirstPassIfPossibleLocked ends the first happy-eyeballs pass if all the +// addresses are tried and their SubConns have reported a failure. +func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) { + // An optimization to avoid iterating over the entire SubConn map. + if b.addressList.isValid() { + return + } + // Connect() has been called on all the SubConns. The first pass can be + // ended if all the SubConns have reported a failure. + for _, sd := range b.subConns.Values() { + if !sd.connectionFailedInFirstPass { + return + } + } + b.firstPass = false + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: lastErr}, + }) + // Start re-connecting all the SubConns that are already in IDLE. + for _, sd := range b.subConns.Values() { + if sd.rawConnectivityState == connectivity.Idle { + sd.subConn.Connect() + } + } +} + +func (b *pickfirstBalancer) isActiveSCData(sd *scData) bool { + activeSD, found := b.subConns.Get(sd.addr) + return found && activeSD == sd +} + +func (b *pickfirstBalancer) updateSubConnHealthState(sd *scData, state balancer.SubConnState) { + b.mu.Lock() + defer b.mu.Unlock() + // Previously relevant SubConns can still callback with state updates. + // To prevent pickers from returning these obsolete SubConns, this logic + // is included to check if the current list of active SubConns includes + // this SubConn. + if !b.isActiveSCData(sd) { + return + } + sd.effectiveState = state.ConnectivityState + switch state.ConnectivityState { + case connectivity.Ready: + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, }) case connectivity.TransientFailure: - b.cc.UpdateState(balancer.State{ - ConnectivityState: state.ConnectivityState, - Picker: &picker{err: state.ConnectionError}, + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("pickfirst: health check failure: %v", state.ConnectionError)}, }) + case connectivity.Connecting: + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + default: + b.logger.Errorf("Got unexpected health update for SubConn %p: %v", state) } - b.state = state.ConnectivityState } -func (b *pickfirstBalancer) Close() { +// updateBalancerState stores the state reported to the channel and calls +// ClientConn.UpdateState(). As an optimization, it avoids sending duplicate +// updates to the channel. +func (b *pickfirstBalancer) updateBalancerState(newState balancer.State) { + // In case of TransientFailures allow the picker to be updated to update + // the connectivity error, in all other cases don't send duplicate state + // updates. + if newState.ConnectivityState == b.state && b.state != connectivity.TransientFailure { + return + } + b.forceUpdateConcludedStateLocked(newState) } -func (b *pickfirstBalancer) ExitIdle() { - if b.subConn != nil && b.state == connectivity.Idle { - b.subConn.Connect() - } +// forceUpdateConcludedStateLocked stores the state reported to the channel and +// calls ClientConn.UpdateState(). +// A separate function is defined to force update the ClientConn state since the +// channel doesn't correctly assume that LB policies start in CONNECTING and +// relies on LB policy to send an initial CONNECTING update. +func (b *pickfirstBalancer) forceUpdateConcludedStateLocked(newState balancer.State) { + b.state = newState.ConnectivityState + b.cc.UpdateState(newState) } type picker struct { @@ -282,10 +822,87 @@ func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { // idlePicker is used when the SubConn is IDLE and kicks the SubConn into // CONNECTING when Pick is called. type idlePicker struct { - subConn balancer.SubConn + exitIdle func() } func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - i.subConn.Connect() + i.exitIdle() return balancer.PickResult{}, balancer.ErrNoSubConnAvailable } + +// addressList manages sequentially iterating over addresses present in a list +// of endpoints. It provides a 1 dimensional view of the addresses present in +// the endpoints. +// This type is not safe for concurrent access. +type addressList struct { + addresses []resolver.Address + idx int +} + +func (al *addressList) isValid() bool { + return al.idx < len(al.addresses) +} + +func (al *addressList) size() int { + return len(al.addresses) +} + +// increment moves to the next index in the address list. +// This method returns false if it went off the list, true otherwise. +func (al *addressList) increment() bool { + if !al.isValid() { + return false + } + al.idx++ + return al.idx < len(al.addresses) +} + +// currentAddress returns the current address pointed to in the addressList. +// If the list is in an invalid state, it returns an empty address instead. +func (al *addressList) currentAddress() resolver.Address { + if !al.isValid() { + return resolver.Address{} + } + return al.addresses[al.idx] +} + +func (al *addressList) reset() { + al.idx = 0 +} + +func (al *addressList) updateAddrs(addrs []resolver.Address) { + al.addresses = addrs + al.reset() +} + +// seekTo returns false if the needle was not found and the current index was +// left unchanged. +func (al *addressList) seekTo(needle resolver.Address) bool { + for ai, addr := range al.addresses { + if !equalAddressIgnoringBalAttributes(&addr, &needle) { + continue + } + al.idx = ai + return true + } + return false +} + +// hasNext returns whether incrementing the addressList will result in moving +// past the end of the list. If the list has already moved past the end, it +// returns false. +func (al *addressList) hasNext() bool { + if !al.isValid() { + return false + } + return al.idx+1 < len(al.addresses) +} + +// equalAddressIgnoringBalAttributes returns true is a and b are considered +// equal. This is different from the Equal method on the resolver.Address type +// which considers all fields to determine equality. Here, we only consider +// fields that are meaningful to the SubConn. +func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { + return a.Addr == b.Addr && a.ServerName == b.ServerName && + a.Attributes.Equal(b.Attributes) +} diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go deleted file mode 100644 index 9ffdd28a01e..00000000000 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go +++ /dev/null @@ -1,913 +0,0 @@ -/* - * - * Copyright 2024 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package pickfirstleaf contains the pick_first load balancing policy which -// will be the universal leaf policy after dualstack changes are implemented. -// -// # Experimental -// -// Notice: This package is EXPERIMENTAL and may be changed or removed in a -// later release. -package pickfirstleaf - -import ( - "encoding/json" - "errors" - "fmt" - "net" - "net/netip" - "sync" - "time" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/pickfirst/internal" - "google.golang.org/grpc/connectivity" - expstats "google.golang.org/grpc/experimental/stats" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/envconfig" - internalgrpclog "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/serviceconfig" -) - -func init() { - if envconfig.NewPickFirstEnabled { - // Register as the default pick_first balancer. - Name = "pick_first" - } - balancer.Register(pickfirstBuilder{}) -} - -// enableHealthListenerKeyType is a unique key type used in resolver -// attributes to indicate whether the health listener usage is enabled. -type enableHealthListenerKeyType struct{} - -var ( - logger = grpclog.Component("pick-first-leaf-lb") - // Name is the name of the pick_first_leaf balancer. - // It is changed to "pick_first" in init() if this balancer is to be - // registered as the default pickfirst. - Name = "pick_first_leaf" - disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ - Name: "grpc.lb.pick_first.disconnections", - Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.", - Unit: "{disconnection}", - Labels: []string{"grpc.target"}, - Default: false, - }) - connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ - Name: "grpc.lb.pick_first.connection_attempts_succeeded", - Description: "EXPERIMENTAL. Number of successful connection attempts.", - Unit: "{attempt}", - Labels: []string{"grpc.target"}, - Default: false, - }) - connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ - Name: "grpc.lb.pick_first.connection_attempts_failed", - Description: "EXPERIMENTAL. Number of failed connection attempts.", - Unit: "{attempt}", - Labels: []string{"grpc.target"}, - Default: false, - }) -) - -const ( - // TODO: change to pick-first when this becomes the default pick_first policy. - logPrefix = "[pick-first-leaf-lb %p] " - // connectionDelayInterval is the time to wait for during the happy eyeballs - // pass before starting the next connection attempt. - connectionDelayInterval = 250 * time.Millisecond -) - -type ipAddrFamily int - -const ( - // ipAddrFamilyUnknown represents strings that can't be parsed as an IP - // address. - ipAddrFamilyUnknown ipAddrFamily = iota - ipAddrFamilyV4 - ipAddrFamilyV6 -) - -type pickfirstBuilder struct{} - -func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) balancer.Balancer { - b := &pickfirstBalancer{ - cc: cc, - target: bo.Target.String(), - metricsRecorder: cc.MetricsRecorder(), - - subConns: resolver.NewAddressMapV2[*scData](), - state: connectivity.Connecting, - cancelConnectionTimer: func() {}, - } - b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) - return b -} - -func (b pickfirstBuilder) Name() string { - return Name -} - -func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - var cfg pfConfig - if err := json.Unmarshal(js, &cfg); err != nil { - return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) - } - return cfg, nil -} - -// EnableHealthListener updates the state to configure pickfirst for using a -// generic health listener. -func EnableHealthListener(state resolver.State) resolver.State { - state.Attributes = state.Attributes.WithValue(enableHealthListenerKeyType{}, true) - return state -} - -type pfConfig struct { - serviceconfig.LoadBalancingConfig `json:"-"` - - // If set to true, instructs the LB policy to shuffle the order of the list - // of endpoints received from the name resolver before attempting to - // connect to them. - ShuffleAddressList bool `json:"shuffleAddressList"` -} - -// scData keeps track of the current state of the subConn. -// It is not safe for concurrent access. -type scData struct { - // The following fields are initialized at build time and read-only after - // that. - subConn balancer.SubConn - addr resolver.Address - - rawConnectivityState connectivity.State - // The effective connectivity state based on raw connectivity, health state - // and after following sticky TransientFailure behaviour defined in A62. - effectiveState connectivity.State - lastErr error - connectionFailedInFirstPass bool -} - -func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { - sd := &scData{ - rawConnectivityState: connectivity.Idle, - effectiveState: connectivity.Idle, - addr: addr, - } - sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{ - StateListener: func(state balancer.SubConnState) { - b.updateSubConnState(sd, state) - }, - }) - if err != nil { - return nil, err - } - sd.subConn = sc - return sd, nil -} - -type pickfirstBalancer struct { - // The following fields are initialized at build time and read-only after - // that and therefore do not need to be guarded by a mutex. - logger *internalgrpclog.PrefixLogger - cc balancer.ClientConn - target string - metricsRecorder expstats.MetricsRecorder // guaranteed to be non nil - - // The mutex is used to ensure synchronization of updates triggered - // from the idle picker and the already serialized resolver, - // SubConn state updates. - mu sync.Mutex - // State reported to the channel based on SubConn states and resolver - // updates. - state connectivity.State - // scData for active subonns mapped by address. - subConns *resolver.AddressMapV2[*scData] - addressList addressList - firstPass bool - numTF int - cancelConnectionTimer func() - healthCheckingEnabled bool -} - -// ResolverError is called by the ClientConn when the name resolver produces -// an error or when pickfirst determined the resolver update to be invalid. -func (b *pickfirstBalancer) ResolverError(err error) { - b.mu.Lock() - defer b.mu.Unlock() - b.resolverErrorLocked(err) -} - -func (b *pickfirstBalancer) resolverErrorLocked(err error) { - if b.logger.V(2) { - b.logger.Infof("Received error from the name resolver: %v", err) - } - - // The picker will not change since the balancer does not currently - // report an error. If the balancer hasn't received a single good resolver - // update yet, transition to TRANSIENT_FAILURE. - if b.state != connectivity.TransientFailure && b.addressList.size() > 0 { - if b.logger.V(2) { - b.logger.Infof("Ignoring resolver error because balancer is using a previous good update.") - } - return - } - - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, - }) -} - -func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { - b.mu.Lock() - defer b.mu.Unlock() - b.cancelConnectionTimer() - if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { - // Cleanup state pertaining to the previous resolver state. - // Treat an empty address list like an error by calling b.ResolverError. - b.closeSubConnsLocked() - b.addressList.updateAddrs(nil) - b.resolverErrorLocked(errors.New("produced zero addresses")) - return balancer.ErrBadResolverState - } - b.healthCheckingEnabled = state.ResolverState.Attributes.Value(enableHealthListenerKeyType{}) != nil - cfg, ok := state.BalancerConfig.(pfConfig) - if state.BalancerConfig != nil && !ok { - return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState) - } - - if b.logger.V(2) { - b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) - } - - var newAddrs []resolver.Address - if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 { - // Perform the optional shuffling described in gRFC A62. The shuffling - // will change the order of endpoints but not touch the order of the - // addresses within each endpoint. - A61 - if cfg.ShuffleAddressList { - endpoints = append([]resolver.Endpoint{}, endpoints...) - internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) - } - - // "Flatten the list by concatenating the ordered list of addresses for - // each of the endpoints, in order." - A61 - for _, endpoint := range endpoints { - newAddrs = append(newAddrs, endpoint.Addresses...) - } - } else { - // Endpoints not set, process addresses until we migrate resolver - // emissions fully to Endpoints. The top channel does wrap emitted - // addresses with endpoints, however some balancers such as weighted - // target do not forward the corresponding correct endpoints down/split - // endpoints properly. Once all balancers correctly forward endpoints - // down, can delete this else conditional. - newAddrs = state.ResolverState.Addresses - if cfg.ShuffleAddressList { - newAddrs = append([]resolver.Address{}, newAddrs...) - internal.RandShuffle(len(newAddrs), func(i, j int) { newAddrs[i], newAddrs[j] = newAddrs[j], newAddrs[i] }) - } - } - - // If an address appears in multiple endpoints or in the same endpoint - // multiple times, we keep it only once. We will create only one SubConn - // for the address because an AddressMap is used to store SubConns. - // Not de-duplicating would result in attempting to connect to the same - // SubConn multiple times in the same pass. We don't want this. - newAddrs = deDupAddresses(newAddrs) - newAddrs = interleaveAddresses(newAddrs) - - prevAddr := b.addressList.currentAddress() - prevSCData, found := b.subConns.Get(prevAddr) - prevAddrsCount := b.addressList.size() - isPrevRawConnectivityStateReady := found && prevSCData.rawConnectivityState == connectivity.Ready - b.addressList.updateAddrs(newAddrs) - - // If the previous ready SubConn exists in new address list, - // keep this connection and don't create new SubConns. - if isPrevRawConnectivityStateReady && b.addressList.seekTo(prevAddr) { - return nil - } - - b.reconcileSubConnsLocked(newAddrs) - // If it's the first resolver update or the balancer was already READY - // (but the new address list does not contain the ready SubConn) or - // CONNECTING, enter CONNECTING. - // We may be in TRANSIENT_FAILURE due to a previous empty address list, - // we should still enter CONNECTING because the sticky TF behaviour - // mentioned in A62 applies only when the TRANSIENT_FAILURE is reported - // due to connectivity failures. - if isPrevRawConnectivityStateReady || b.state == connectivity.Connecting || prevAddrsCount == 0 { - // Start connection attempt at first address. - b.forceUpdateConcludedStateLocked(balancer.State{ - ConnectivityState: connectivity.Connecting, - Picker: &picker{err: balancer.ErrNoSubConnAvailable}, - }) - b.startFirstPassLocked() - } else if b.state == connectivity.TransientFailure { - // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until - // we're READY. See A62. - b.startFirstPassLocked() - } - return nil -} - -// UpdateSubConnState is unused as a StateListener is always registered when -// creating SubConns. -func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { - b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) -} - -func (b *pickfirstBalancer) Close() { - b.mu.Lock() - defer b.mu.Unlock() - b.closeSubConnsLocked() - b.cancelConnectionTimer() - b.state = connectivity.Shutdown -} - -// ExitIdle moves the balancer out of idle state. It can be called concurrently -// by the idlePicker and clientConn so access to variables should be -// synchronized. -func (b *pickfirstBalancer) ExitIdle() { - b.mu.Lock() - defer b.mu.Unlock() - if b.state == connectivity.Idle { - // Move the balancer into CONNECTING state immediately. This is done to - // avoid staying in IDLE if a resolver update arrives before the first - // SubConn reports CONNECTING. - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Connecting, - Picker: &picker{err: balancer.ErrNoSubConnAvailable}, - }) - b.startFirstPassLocked() - } -} - -func (b *pickfirstBalancer) startFirstPassLocked() { - b.firstPass = true - b.numTF = 0 - // Reset the connection attempt record for existing SubConns. - for _, sd := range b.subConns.Values() { - sd.connectionFailedInFirstPass = false - } - b.requestConnectionLocked() -} - -func (b *pickfirstBalancer) closeSubConnsLocked() { - for _, sd := range b.subConns.Values() { - sd.subConn.Shutdown() - } - b.subConns = resolver.NewAddressMapV2[*scData]() -} - -// deDupAddresses ensures that each address appears only once in the slice. -func deDupAddresses(addrs []resolver.Address) []resolver.Address { - seenAddrs := resolver.NewAddressMapV2[*scData]() - retAddrs := []resolver.Address{} - - for _, addr := range addrs { - if _, ok := seenAddrs.Get(addr); ok { - continue - } - retAddrs = append(retAddrs, addr) - } - return retAddrs -} - -// interleaveAddresses interleaves addresses of both families (IPv4 and IPv6) -// as per RFC-8305 section 4. -// Whichever address family is first in the list is followed by an address of -// the other address family; that is, if the first address in the list is IPv6, -// then the first IPv4 address should be moved up in the list to be second in -// the list. It doesn't support configuring "First Address Family Count", i.e. -// there will always be a single member of the first address family at the -// beginning of the interleaved list. -// Addresses that are neither IPv4 nor IPv6 are treated as part of a third -// "unknown" family for interleaving. -// See: https://datatracker.ietf.org/doc/html/rfc8305#autoid-6 -func interleaveAddresses(addrs []resolver.Address) []resolver.Address { - familyAddrsMap := map[ipAddrFamily][]resolver.Address{} - interleavingOrder := []ipAddrFamily{} - for _, addr := range addrs { - family := addressFamily(addr.Addr) - if _, found := familyAddrsMap[family]; !found { - interleavingOrder = append(interleavingOrder, family) - } - familyAddrsMap[family] = append(familyAddrsMap[family], addr) - } - - interleavedAddrs := make([]resolver.Address, 0, len(addrs)) - - for curFamilyIdx := 0; len(interleavedAddrs) < len(addrs); curFamilyIdx = (curFamilyIdx + 1) % len(interleavingOrder) { - // Some IP types may have fewer addresses than others, so we look for - // the next type that has a remaining member to add to the interleaved - // list. - family := interleavingOrder[curFamilyIdx] - remainingMembers := familyAddrsMap[family] - if len(remainingMembers) > 0 { - interleavedAddrs = append(interleavedAddrs, remainingMembers[0]) - familyAddrsMap[family] = remainingMembers[1:] - } - } - - return interleavedAddrs -} - -// addressFamily returns the ipAddrFamily after parsing the address string. -// If the address isn't of the format "ip-address:port", it returns -// ipAddrFamilyUnknown. The address may be valid even if it's not an IP when -// using a resolver like passthrough where the address may be a hostname in -// some format that the dialer can resolve. -func addressFamily(address string) ipAddrFamily { - // Parse the IP after removing the port. - host, _, err := net.SplitHostPort(address) - if err != nil { - return ipAddrFamilyUnknown - } - ip, err := netip.ParseAddr(host) - if err != nil { - return ipAddrFamilyUnknown - } - switch { - case ip.Is4() || ip.Is4In6(): - return ipAddrFamilyV4 - case ip.Is6(): - return ipAddrFamilyV6 - default: - return ipAddrFamilyUnknown - } -} - -// reconcileSubConnsLocked updates the active subchannels based on a new address -// list from the resolver. It does this by: -// - closing subchannels: any existing subchannels associated with addresses -// that are no longer in the updated list are shut down. -// - removing subchannels: entries for these closed subchannels are removed -// from the subchannel map. -// -// This ensures that the subchannel map accurately reflects the current set of -// addresses received from the name resolver. -func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) { - newAddrsMap := resolver.NewAddressMapV2[bool]() - for _, addr := range newAddrs { - newAddrsMap.Set(addr, true) - } - - for _, oldAddr := range b.subConns.Keys() { - if _, ok := newAddrsMap.Get(oldAddr); ok { - continue - } - val, _ := b.subConns.Get(oldAddr) - val.subConn.Shutdown() - b.subConns.Delete(oldAddr) - } -} - -// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn -// becomes ready, which means that all other subConn must be shutdown. -func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) { - b.cancelConnectionTimer() - for _, sd := range b.subConns.Values() { - if sd.subConn != selected.subConn { - sd.subConn.Shutdown() - } - } - b.subConns = resolver.NewAddressMapV2[*scData]() - b.subConns.Set(selected.addr, selected) -} - -// requestConnectionLocked starts connecting on the subchannel corresponding to -// the current address. If no subchannel exists, one is created. If the current -// subchannel is in TransientFailure, a connection to the next address is -// attempted until a subchannel is found. -func (b *pickfirstBalancer) requestConnectionLocked() { - if !b.addressList.isValid() { - return - } - var lastErr error - for valid := true; valid; valid = b.addressList.increment() { - curAddr := b.addressList.currentAddress() - sd, ok := b.subConns.Get(curAddr) - if !ok { - var err error - // We want to assign the new scData to sd from the outer scope, - // hence we can't use := below. - sd, err = b.newSCData(curAddr) - if err != nil { - // This should never happen, unless the clientConn is being shut - // down. - if b.logger.V(2) { - b.logger.Infof("Failed to create a subConn for address %v: %v", curAddr.String(), err) - } - // Do nothing, the LB policy will be closed soon. - return - } - b.subConns.Set(curAddr, sd) - } - - switch sd.rawConnectivityState { - case connectivity.Idle: - sd.subConn.Connect() - b.scheduleNextConnectionLocked() - return - case connectivity.TransientFailure: - // The SubConn is being re-used and failed during a previous pass - // over the addressList. It has not completed backoff yet. - // Mark it as having failed and try the next address. - sd.connectionFailedInFirstPass = true - lastErr = sd.lastErr - continue - case connectivity.Connecting: - // Wait for the connection attempt to complete or the timer to fire - // before attempting the next address. - b.scheduleNextConnectionLocked() - return - default: - b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", sd.rawConnectivityState) - return - - } - } - - // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the - // first pass if possible. - b.endFirstPassIfPossibleLocked(lastErr) -} - -func (b *pickfirstBalancer) scheduleNextConnectionLocked() { - b.cancelConnectionTimer() - if !b.addressList.hasNext() { - return - } - curAddr := b.addressList.currentAddress() - cancelled := false // Access to this is protected by the balancer's mutex. - closeFn := internal.TimeAfterFunc(connectionDelayInterval, func() { - b.mu.Lock() - defer b.mu.Unlock() - // If the scheduled task is cancelled while acquiring the mutex, return. - if cancelled { - return - } - if b.logger.V(2) { - b.logger.Infof("Happy Eyeballs timer expired while waiting for connection to %q.", curAddr.Addr) - } - if b.addressList.increment() { - b.requestConnectionLocked() - } - }) - // Access to the cancellation callback held by the balancer is guarded by - // the balancer's mutex, so it's safe to set the boolean from the callback. - b.cancelConnectionTimer = sync.OnceFunc(func() { - cancelled = true - closeFn() - }) -} - -func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) { - b.mu.Lock() - defer b.mu.Unlock() - oldState := sd.rawConnectivityState - sd.rawConnectivityState = newState.ConnectivityState - // Previously relevant SubConns can still callback with state updates. - // To prevent pickers from returning these obsolete SubConns, this logic - // is included to check if the current list of active SubConns includes this - // SubConn. - if !b.isActiveSCData(sd) { - return - } - if newState.ConnectivityState == connectivity.Shutdown { - sd.effectiveState = connectivity.Shutdown - return - } - - // Record a connection attempt when exiting CONNECTING. - if newState.ConnectivityState == connectivity.TransientFailure { - sd.connectionFailedInFirstPass = true - connectionAttemptsFailedMetric.Record(b.metricsRecorder, 1, b.target) - } - - if newState.ConnectivityState == connectivity.Ready { - connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) - b.shutdownRemainingLocked(sd) - if !b.addressList.seekTo(sd.addr) { - // This should not fail as we should have only one SubConn after - // entering READY. The SubConn should be present in the addressList. - b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses) - return - } - if !b.healthCheckingEnabled { - if b.logger.V(2) { - b.logger.Infof("SubConn %p reported connectivity state READY and the health listener is disabled. Transitioning SubConn to READY.", sd.subConn) - } - - sd.effectiveState = connectivity.Ready - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Ready, - Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, - }) - return - } - if b.logger.V(2) { - b.logger.Infof("SubConn %p reported connectivity state READY. Registering health listener.", sd.subConn) - } - // Send a CONNECTING update to take the SubConn out of sticky-TF if - // required. - sd.effectiveState = connectivity.Connecting - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Connecting, - Picker: &picker{err: balancer.ErrNoSubConnAvailable}, - }) - sd.subConn.RegisterHealthListener(func(scs balancer.SubConnState) { - b.updateSubConnHealthState(sd, scs) - }) - return - } - - // If the LB policy is READY, and it receives a subchannel state change, - // it means that the READY subchannel has failed. - // A SubConn can also transition from CONNECTING directly to IDLE when - // a transport is successfully created, but the connection fails - // before the SubConn can send the notification for READY. We treat - // this as a successful connection and transition to IDLE. - // TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second - // part of the if condition below once the issue is fixed. - if oldState == connectivity.Ready || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) { - // Once a transport fails, the balancer enters IDLE and starts from - // the first address when the picker is used. - b.shutdownRemainingLocked(sd) - sd.effectiveState = newState.ConnectivityState - // READY SubConn interspliced in between CONNECTING and IDLE, need to - // account for that. - if oldState == connectivity.Connecting { - // A known issue (https://github.com/grpc/grpc-go/issues/7862) - // causes a race that prevents the READY state change notification. - // This works around it. - connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) - } - disconnectionsMetric.Record(b.metricsRecorder, 1, b.target) - b.addressList.reset() - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Idle, - Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)}, - }) - return - } - - if b.firstPass { - switch newState.ConnectivityState { - case connectivity.Connecting: - // The effective state can be in either IDLE, CONNECTING or - // TRANSIENT_FAILURE. If it's TRANSIENT_FAILURE, stay in - // TRANSIENT_FAILURE until it's READY. See A62. - if sd.effectiveState != connectivity.TransientFailure { - sd.effectiveState = connectivity.Connecting - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Connecting, - Picker: &picker{err: balancer.ErrNoSubConnAvailable}, - }) - } - case connectivity.TransientFailure: - sd.lastErr = newState.ConnectionError - sd.effectiveState = connectivity.TransientFailure - // Since we're re-using common SubConns while handling resolver - // updates, we could receive an out of turn TRANSIENT_FAILURE from - // a pass over the previous address list. Happy Eyeballs will also - // cause out of order updates to arrive. - - if curAddr := b.addressList.currentAddress(); equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) { - b.cancelConnectionTimer() - if b.addressList.increment() { - b.requestConnectionLocked() - return - } - } - - // End the first pass if we've seen a TRANSIENT_FAILURE from all - // SubConns once. - b.endFirstPassIfPossibleLocked(newState.ConnectionError) - } - return - } - - // We have finished the first pass, keep re-connecting failing SubConns. - switch newState.ConnectivityState { - case connectivity.TransientFailure: - b.numTF = (b.numTF + 1) % b.subConns.Len() - sd.lastErr = newState.ConnectionError - if b.numTF%b.subConns.Len() == 0 { - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: newState.ConnectionError}, - }) - } - // We don't need to request re-resolution since the SubConn already - // does that before reporting TRANSIENT_FAILURE. - // TODO: #7534 - Move re-resolution requests from SubConn into - // pick_first. - case connectivity.Idle: - sd.subConn.Connect() - } -} - -// endFirstPassIfPossibleLocked ends the first happy-eyeballs pass if all the -// addresses are tried and their SubConns have reported a failure. -func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) { - // An optimization to avoid iterating over the entire SubConn map. - if b.addressList.isValid() { - return - } - // Connect() has been called on all the SubConns. The first pass can be - // ended if all the SubConns have reported a failure. - for _, sd := range b.subConns.Values() { - if !sd.connectionFailedInFirstPass { - return - } - } - b.firstPass = false - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: lastErr}, - }) - // Start re-connecting all the SubConns that are already in IDLE. - for _, sd := range b.subConns.Values() { - if sd.rawConnectivityState == connectivity.Idle { - sd.subConn.Connect() - } - } -} - -func (b *pickfirstBalancer) isActiveSCData(sd *scData) bool { - activeSD, found := b.subConns.Get(sd.addr) - return found && activeSD == sd -} - -func (b *pickfirstBalancer) updateSubConnHealthState(sd *scData, state balancer.SubConnState) { - b.mu.Lock() - defer b.mu.Unlock() - // Previously relevant SubConns can still callback with state updates. - // To prevent pickers from returning these obsolete SubConns, this logic - // is included to check if the current list of active SubConns includes - // this SubConn. - if !b.isActiveSCData(sd) { - return - } - sd.effectiveState = state.ConnectivityState - switch state.ConnectivityState { - case connectivity.Ready: - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Ready, - Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, - }) - case connectivity.TransientFailure: - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("pickfirst: health check failure: %v", state.ConnectionError)}, - }) - case connectivity.Connecting: - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Connecting, - Picker: &picker{err: balancer.ErrNoSubConnAvailable}, - }) - default: - b.logger.Errorf("Got unexpected health update for SubConn %p: %v", state) - } -} - -// updateBalancerState stores the state reported to the channel and calls -// ClientConn.UpdateState(). As an optimization, it avoids sending duplicate -// updates to the channel. -func (b *pickfirstBalancer) updateBalancerState(newState balancer.State) { - // In case of TransientFailures allow the picker to be updated to update - // the connectivity error, in all other cases don't send duplicate state - // updates. - if newState.ConnectivityState == b.state && b.state != connectivity.TransientFailure { - return - } - b.forceUpdateConcludedStateLocked(newState) -} - -// forceUpdateConcludedStateLocked stores the state reported to the channel and -// calls ClientConn.UpdateState(). -// A separate function is defined to force update the ClientConn state since the -// channel doesn't correctly assume that LB policies start in CONNECTING and -// relies on LB policy to send an initial CONNECTING update. -func (b *pickfirstBalancer) forceUpdateConcludedStateLocked(newState balancer.State) { - b.state = newState.ConnectivityState - b.cc.UpdateState(newState) -} - -type picker struct { - result balancer.PickResult - err error -} - -func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - return p.result, p.err -} - -// idlePicker is used when the SubConn is IDLE and kicks the SubConn into -// CONNECTING when Pick is called. -type idlePicker struct { - exitIdle func() -} - -func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - i.exitIdle() - return balancer.PickResult{}, balancer.ErrNoSubConnAvailable -} - -// addressList manages sequentially iterating over addresses present in a list -// of endpoints. It provides a 1 dimensional view of the addresses present in -// the endpoints. -// This type is not safe for concurrent access. -type addressList struct { - addresses []resolver.Address - idx int -} - -func (al *addressList) isValid() bool { - return al.idx < len(al.addresses) -} - -func (al *addressList) size() int { - return len(al.addresses) -} - -// increment moves to the next index in the address list. -// This method returns false if it went off the list, true otherwise. -func (al *addressList) increment() bool { - if !al.isValid() { - return false - } - al.idx++ - return al.idx < len(al.addresses) -} - -// currentAddress returns the current address pointed to in the addressList. -// If the list is in an invalid state, it returns an empty address instead. -func (al *addressList) currentAddress() resolver.Address { - if !al.isValid() { - return resolver.Address{} - } - return al.addresses[al.idx] -} - -func (al *addressList) reset() { - al.idx = 0 -} - -func (al *addressList) updateAddrs(addrs []resolver.Address) { - al.addresses = addrs - al.reset() -} - -// seekTo returns false if the needle was not found and the current index was -// left unchanged. -func (al *addressList) seekTo(needle resolver.Address) bool { - for ai, addr := range al.addresses { - if !equalAddressIgnoringBalAttributes(&addr, &needle) { - continue - } - al.idx = ai - return true - } - return false -} - -// hasNext returns whether incrementing the addressList will result in moving -// past the end of the list. If the list has already moved past the end, it -// returns false. -func (al *addressList) hasNext() bool { - if !al.isValid() { - return false - } - return al.idx+1 < len(al.addresses) -} - -// equalAddressIgnoringBalAttributes returns true is a and b are considered -// equal. This is different from the Equal method on the resolver.Address type -// which considers all fields to determine equality. Here, we only consider -// fields that are meaningful to the SubConn. -func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { - return a.Addr == b.Addr && a.ServerName == b.ServerName && - a.Attributes.Equal(b.Attributes) -} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index 22045bf3946..22e6e326794 100644 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -26,7 +26,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/endpointsharding" - "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" + "google.golang.org/grpc/balancer/pickfirst" "google.golang.org/grpc/grpclog" internalgrpclog "google.golang.org/grpc/internal/grpclog" ) @@ -47,7 +47,7 @@ func (bb builder) Name() string { } func (bb builder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { - childBuilder := balancer.Get(pickfirstleaf.Name).Build + childBuilder := balancer.Get(pickfirst.Name).Build bal := &rrBalancer{ cc: cc, Balancer: endpointsharding.NewBalancer(cc, opts, childBuilder, endpointsharding.Options{}), @@ -67,6 +67,6 @@ func (b *rrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { return b.Balancer.UpdateClientConnState(balancer.ClientConnState{ // Enable the health listener in pickfirst children for client side health // checks and outlier detection, if configured. - ResolverState: pickfirstleaf.EnableHealthListener(ccs.ResolverState), + ResolverState: pickfirst.EnableHealthListener(ccs.ResolverState), }) } diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go index 948a21ef683..2c760e623f6 100644 --- a/vendor/google.golang.org/grpc/balancer_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -450,13 +450,14 @@ func (acbw *acBalancerWrapper) healthListenerRegFn() func(context.Context, func( if acbw.ccb.cc.dopts.disableHealthCheck { return noOpRegisterHealthListenerFn } + cfg := acbw.ac.cc.healthCheckConfig() + if cfg == nil { + return noOpRegisterHealthListenerFn + } regHealthLisFn := internal.RegisterClientHealthCheckListener if regHealthLisFn == nil { // The health package is not imported. - return noOpRegisterHealthListenerFn - } - cfg := acbw.ac.cc.healthCheckConfig() - if cfg == nil { + channelz.Error(logger, acbw.ac.channelz, "Health check is requested but health package is not imported.") return noOpRegisterHealthListenerFn } return func(ctx context.Context, listener func(balancer.SubConnState)) func() { diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index b1364a03252..42c61cf9fe5 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.6 +// protoc-gen-go v1.36.10 // protoc v5.27.1 // source: grpc/binlog/v1/binarylog.proto diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index a3c315f2d76..c0c2c9a76ab 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -40,11 +40,12 @@ import ( "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/idle" iresolver "google.golang.org/grpc/internal/resolver" - "google.golang.org/grpc/internal/stats" + istats "google.golang.org/grpc/internal/stats" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/stats" "google.golang.org/grpc/status" _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. @@ -210,7 +211,8 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz) cc.pickerWrapper = newPickerWrapper() - cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers) + cc.metricsRecorderList = istats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers) + cc.statsHandler = istats.NewCombinedHandler(cc.dopts.copts.StatsHandlers...) cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc. cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout) @@ -621,7 +623,8 @@ type ClientConn struct { channelz *channelz.Channel // Channelz object. resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder(). idlenessMgr *idle.Manager - metricsRecorderList *stats.MetricsRecorderList + metricsRecorderList *istats.MetricsRecorderList + statsHandler stats.Handler // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index c8e337cdda0..06f6c6c70a9 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -44,8 +44,7 @@ type PerRPCCredentials interface { // A54). uri is the URI of the entry point for the request. When supported // by the underlying implementation, ctx can be used for timeout and // cancellation. Additionally, RequestInfo data will be available via ctx - // to this call. TODO(zhaoq): Define the set of the qualified keys instead - // of leaving it as an arbitrary string. + // to this call. GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) // RequireTransportSecurity indicates whether the credentials requires // transport security. diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 11d0ae142c4..dadd21e40f9 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -27,8 +27,10 @@ package encoding import ( "io" + "slices" "strings" + "google.golang.org/grpc/encoding/internal" "google.golang.org/grpc/internal/grpcutil" ) @@ -36,6 +38,24 @@ import ( // It is intended for grpc internal use only. const Identity = "identity" +func init() { + internal.RegisterCompressorForTesting = func(c Compressor) func() { + name := c.Name() + curCompressor, found := registeredCompressor[name] + RegisterCompressor(c) + return func() { + if found { + registeredCompressor[name] = curCompressor + return + } + delete(registeredCompressor, name) + grpcutil.RegisteredCompressorNames = slices.DeleteFunc(grpcutil.RegisteredCompressorNames, func(s string) bool { + return s == name + }) + } + } +} + // Compressor is used for compressing and decompressing when sending or // receiving messages. // diff --git a/vendor/google.golang.org/grpc/encoding/internal/internal.go b/vendor/google.golang.org/grpc/encoding/internal/internal.go new file mode 100644 index 00000000000..ee9acb43779 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/internal/internal.go @@ -0,0 +1,28 @@ +/* + * + * Copyright 2025 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains code internal to the encoding package. +package internal + +// RegisterCompressorForTesting registers a compressor in the global compressor +// registry. It returns a cleanup function that should be called at the end +// of the test to unregister the compressor. +// +// This prevents compressors registered in one test from appearing in the +// encoding headers of subsequent tests. +var RegisterCompressorForTesting any // func RegisterCompressor(c Compressor) func() diff --git a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go index ad75313a18e..2b57ba65a39 100644 --- a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go +++ b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go @@ -75,6 +75,7 @@ const ( MetricTypeIntHisto MetricTypeFloatHisto MetricTypeIntGauge + MetricTypeIntUpDownCount ) // Int64CountHandle is a typed handle for a int count metric. This handle @@ -93,6 +94,23 @@ func (h *Int64CountHandle) Record(recorder MetricsRecorder, incr int64, labels . recorder.RecordInt64Count(h, incr, labels...) } +// Int64UpDownCountHandle is a typed handle for an int up-down counter metric. +// This handle is passed at the recording point in order to know which metric +// to record on. +type Int64UpDownCountHandle MetricDescriptor + +// Descriptor returns the int64 up-down counter handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64UpDownCountHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 up-down counter value on the metrics recorder provided. +// The value 'v' can be positive to increment or negative to decrement. +func (h *Int64UpDownCountHandle) Record(recorder MetricsRecorder, v int64, labels ...string) { + recorder.RecordInt64UpDownCount(h, v, labels...) +} + // Float64CountHandle is a typed handle for a float count metric. This handle is // passed at the recording point in order to know which metric to record on. type Float64CountHandle MetricDescriptor @@ -249,6 +267,21 @@ func RegisterInt64Gauge(descriptor MetricDescriptor) *Int64GaugeHandle { return (*Int64GaugeHandle)(descPtr) } +// RegisterInt64UpDownCount registers the metric description onto the global registry. +// It returns a typed handle to use for recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64UpDownCount(descriptor MetricDescriptor) *Int64UpDownCountHandle { + registerMetric(descriptor.Name, descriptor.Default) + // Set the specific metric type for the up-down counter + descriptor.Type = MetricTypeIntUpDownCount + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64UpDownCountHandle)(descPtr) +} + // snapshotMetricsRegistryForTesting snapshots the global data of the metrics // registry. Returns a cleanup function that sets the metrics registry to its // original state. diff --git a/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/vendor/google.golang.org/grpc/experimental/stats/metrics.go index ee1423605ab..cb57f1a748b 100644 --- a/vendor/google.golang.org/grpc/experimental/stats/metrics.go +++ b/vendor/google.golang.org/grpc/experimental/stats/metrics.go @@ -38,6 +38,9 @@ type MetricsRecorder interface { // RecordInt64Gauge records the measurement alongside labels on the int // gauge associated with the provided handle. RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string) + // RecordInt64UpDownCounter records the measurement alongside labels on the int + // count associated with the provided handle. + RecordInt64UpDownCount(handle *Int64UpDownCountHandle, incr int64, labels ...string) } // Metrics is an experimental legacy alias of the now-stable stats.MetricSet. diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 22d263fb94b..8f7d9f6bbe6 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.6 +// protoc-gen-go v1.36.10 // protoc v5.27.1 // source: grpc/health/v1/health.proto diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 7e060f5ed13..91f760936c0 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -52,12 +52,6 @@ var ( // or "false". EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true) - // NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used - // instead of the exiting pickfirst implementation. This can be disabled by - // setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST" - // to "false". - NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", true) - // XDSEndpointHashKeyBackwardCompat controls the parsing of the endpoint hash // key from EDS LbEndpoint metadata. Endpoint hash keys can be disabled by // setting "GRPC_XDS_ENDPOINT_HASH_KEY_BACKWARD_COMPAT" to "true". When the @@ -75,6 +69,14 @@ var ( // ALTSHandshakerKeepaliveParams is set if we should add the // KeepaliveParams when dial the ALTS handshaker service. ALTSHandshakerKeepaliveParams = boolFromEnv("GRPC_EXPERIMENTAL_ALTS_HANDSHAKER_KEEPALIVE_PARAMS", false) + + // EnableDefaultPortForProxyTarget controls whether the resolver adds a default port 443 + // to a target address that lacks one. This flag only has an effect when all of + // the following conditions are met: + // - A connect proxy is being used. + // - Target resolution is disabled. + // - The DNS resolver is being used. + EnableDefaultPortForProxyTarget = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_DEFAULT_PORT_FOR_PROXY_TARGET", true) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go index b1f883bcac1..7685d08b54d 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -74,4 +74,9 @@ var ( // For more details, see: // https://github.com/grpc/proposal/blob/master/A86-xds-http-connect.md XDSHTTPConnectEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_HTTP_CONNECT", false) + + // XDSBootstrapCallCredsEnabled controls if call credentials can be used in + // xDS bootstrap configuration via the `call_creds` field. For more details, + // see: https://github.com/grpc/proposal/blob/master/A97-xds-jwt-call-creds.md + XDSBootstrapCallCredsEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_BOOTSTRAP_CALL_CREDS", false) ) diff --git a/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go index 20b8fb098ac..5bfa67b7268 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go @@ -22,11 +22,13 @@ package delegatingresolver import ( "fmt" + "net" "net/http" "net/url" "sync" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/proxyattributes" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/internal/transport/networktype" @@ -40,6 +42,8 @@ var ( HTTPSProxyFromEnvironment = http.ProxyFromEnvironment ) +const defaultPort = "443" + // delegatingResolver manages both target URI and proxy address resolution by // delegating these tasks to separate child resolvers. Essentially, it acts as // an intermediary between the gRPC ClientConn and the child resolvers. @@ -107,10 +111,18 @@ func New(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOpti targetResolver: nopResolver{}, } + addr := target.Endpoint() var err error - r.proxyURL, err = proxyURLForTarget(target.Endpoint()) + if target.URL.Scheme == "dns" && !targetResolutionEnabled && envconfig.EnableDefaultPortForProxyTarget { + addr, err = parseTarget(addr) + if err != nil { + return nil, fmt.Errorf("delegating_resolver: invalid target address %q: %v", target.Endpoint(), err) + } + } + + r.proxyURL, err = proxyURLForTarget(addr) if err != nil { - return nil, fmt.Errorf("delegating_resolver: failed to determine proxy URL for target %s: %v", target, err) + return nil, fmt.Errorf("delegating_resolver: failed to determine proxy URL for target %q: %v", target, err) } // proxy is not configured or proxy address excluded using `NO_PROXY` env @@ -132,8 +144,8 @@ func New(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOpti // bypass the target resolver and store the unresolved target address. if target.URL.Scheme == "dns" && !targetResolutionEnabled { r.targetResolverState = &resolver.State{ - Addresses: []resolver.Address{{Addr: target.Endpoint()}}, - Endpoints: []resolver.Endpoint{{Addresses: []resolver.Address{{Addr: target.Endpoint()}}}}, + Addresses: []resolver.Address{{Addr: addr}}, + Endpoints: []resolver.Endpoint{{Addresses: []resolver.Address{{Addr: addr}}}}, } r.updateTargetResolverState(*r.targetResolverState) return r, nil @@ -202,6 +214,44 @@ func needsProxyResolver(state *resolver.State) bool { return false } +// parseTarget takes a target string and ensures it is a valid "host:port" target. +// +// It does the following: +// 1. If the target already has a port (e.g., "host:port", "[ipv6]:port"), +// it is returned as is. +// 2. If the host part is empty (e.g., ":80"), it defaults to "localhost", +// returning "localhost:80". +// 3. If the target is missing a port (e.g., "host", "ipv6"), the defaultPort +// is added. +// +// An error is returned for empty targets or targets with a trailing colon +// but no port (e.g., "host:"). +func parseTarget(target string) (string, error) { + if target == "" { + return "", fmt.Errorf("missing address") + } + + host, port, err := net.SplitHostPort(target) + if err != nil { + // If SplitHostPort fails, it's likely because the port is missing. + // We append the default port and return the result. + return net.JoinHostPort(target, defaultPort), nil + } + + // If SplitHostPort succeeds, we check for edge cases. + if port == "" { + // A success with an empty port means the target had a trailing colon, + // e.g., "host:", which is an error. + return "", fmt.Errorf("missing port after port-separator colon") + } + if host == "" { + // A success with an empty host means the target was like ":80". + // We default the host to "localhost". + host = "localhost" + } + return net.JoinHostPort(host, port), nil +} + func skipProxy(address resolver.Address) bool { // Avoid proxy when network is not tcp. networkType, ok := networktype.Get(address) diff --git a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go index 79044657be1..d5f7e4d62dd 100644 --- a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go +++ b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go @@ -64,6 +64,16 @@ func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, } } +// RecordInt64UpDownCount records the measurement alongside labels on the int +// count associated with the provided handle. +func (l *MetricsRecorderList) RecordInt64UpDownCount(handle *estats.Int64UpDownCountHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64UpDownCount(handle, incr, labels...) + } +} + // RecordFloat64Count records the measurement alongside labels on the float // count associated with the provided handle. func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) { diff --git a/vendor/google.golang.org/grpc/internal/stats/stats.go b/vendor/google.golang.org/grpc/internal/stats/stats.go new file mode 100644 index 00000000000..49019b80d15 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/stats/stats.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2025 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( + "context" + + "google.golang.org/grpc/stats" +) + +type combinedHandler struct { + handlers []stats.Handler +} + +// NewCombinedHandler combines multiple stats.Handlers into a single handler. +// +// It returns nil if no handlers are provided. If only one handler is +// provided, it is returned directly without wrapping. +func NewCombinedHandler(handlers ...stats.Handler) stats.Handler { + switch len(handlers) { + case 0: + return nil + case 1: + return handlers[0] + default: + return &combinedHandler{handlers: handlers} + } +} + +func (ch *combinedHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { + for _, h := range ch.handlers { + ctx = h.TagRPC(ctx, info) + } + return ctx +} + +func (ch *combinedHandler) HandleRPC(ctx context.Context, stats stats.RPCStats) { + for _, h := range ch.handlers { + h.HandleRPC(ctx, stats) + } +} + +func (ch *combinedHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context { + for _, h := range ch.handlers { + ctx = h.TagConn(ctx, info) + } + return ctx +} + +func (ch *combinedHandler) HandleConn(ctx context.Context, stats stats.ConnStats) { + for _, h := range ch.handlers { + h.HandleConn(ctx, stats) + } +} diff --git a/vendor/google.golang.org/grpc/internal/transport/client_stream.go b/vendor/google.golang.org/grpc/internal/transport/client_stream.go index ccc0e017e5e..980452519ea 100644 --- a/vendor/google.golang.org/grpc/internal/transport/client_stream.go +++ b/vendor/google.golang.org/grpc/internal/transport/client_stream.go @@ -29,25 +29,27 @@ import ( // ClientStream implements streaming functionality for a gRPC client. type ClientStream struct { - *Stream // Embed for common stream functionality. + Stream // Embed for common stream functionality. ct *http2Client done chan struct{} // closed at the end of stream to unblock writers. doneFunc func() // invoked at the end of stream. - headerChan chan struct{} // closed to indicate the end of header metadata. - headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + headerChan chan struct{} // closed to indicate the end of header metadata. + header metadata.MD // the received header metadata + + status *status.Status // the status error received from the server + + // Non-pointer fields are at the end to optimize GC allocations. + // headerValid indicates whether a valid header was received. Only // meaningful after headerChan is closed (always call waitOnHeader() before // reading its value). - headerValid bool - header metadata.MD // the received header metadata - noHeaders bool // set if the client never received headers (set only after the stream is done). - - bytesReceived atomic.Bool // indicates whether any bytes have been received on this stream - unprocessed atomic.Bool // set if the server sends a refused stream or GOAWAY including this stream - - status *status.Status // the status error received from the server + headerValid bool + noHeaders bool // set if the client never received headers (set only after the stream is done). + headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + bytesReceived atomic.Bool // indicates whether any bytes have been received on this stream + unprocessed atomic.Bool // set if the server sends a refused stream or GOAWAY including this stream } // Read reads an n byte message from the input stream. @@ -142,3 +144,11 @@ func (s *ClientStream) TrailersOnly() bool { func (s *ClientStream) Status() *status.Status { return s.status } + +func (s *ClientStream) requestRead(n int) { + s.ct.adjustWindow(s, uint32(n)) +} + +func (s *ClientStream) updateWindow(n int) { + s.ct.updateWindow(s, uint32(n)) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index a2831e5d01f..2dcd1e63bdd 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -496,6 +496,16 @@ const ( serverSide ) +// maxWriteBufSize is the maximum length (number of elements) the cached +// writeBuf can grow to. The length depends on the number of buffers +// contained within the BufferSlice produced by the codec, which is +// generally small. +// +// If a writeBuf larger than this limit is required, it will be allocated +// and freed after use, rather than being cached. This avoids holding +// on to large amounts of memory. +const maxWriteBufSize = 64 + // Loopy receives frames from the control buffer. // Each frame is handled individually; most of the work done by loopy goes // into handling data frames. Loopy maintains a queue of active streams, and each @@ -530,6 +540,8 @@ type loopyWriter struct { // Side-specific handlers ssGoAwayHandler func(*goAway) (bool, error) + + writeBuf [][]byte // cached slice to avoid heap allocations for calls to mem.Reader.Peek. } func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error), bufferPool mem.BufferPool) *loopyWriter { @@ -665,11 +677,10 @@ func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { func (l *loopyWriter) registerStreamHandler(h *registerStream) { str := &outStream{ - id: h.streamID, - state: empty, - itl: &itemList{}, - wq: h.wq, - reader: mem.BufferSlice{}.Reader(), + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, } l.estdStreams[h.streamID] = str } @@ -701,11 +712,10 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error { } // Case 2: Client wants to originate stream. str := &outStream{ - id: h.streamID, - state: empty, - itl: &itemList{}, - wq: h.wq, - reader: mem.BufferSlice{}.Reader(), + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, } return l.originateStream(str, h) } @@ -948,11 +958,11 @@ func (l *loopyWriter) processData() (bool, error) { if str == nil { return true, nil } - reader := str.reader + reader := &str.reader dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. if !dataItem.processing { dataItem.processing = true - str.reader.Reset(dataItem.data) + reader.Reset(dataItem.data) dataItem.data.Free() } // A data item is represented by a dataFrame, since it later translates into @@ -964,11 +974,11 @@ func (l *loopyWriter) processData() (bool, error) { if len(dataItem.h) == 0 && reader.Remaining() == 0 { // Empty data frame // Client sends out empty data frame with endStream = true - if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { + if err := l.framer.writeData(dataItem.streamID, dataItem.endStream, nil); err != nil { return false, err } str.itl.dequeue() // remove the empty data item from stream - _ = reader.Close() + reader.Close() if str.itl.isEmpty() { str.state = empty } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. @@ -1001,25 +1011,20 @@ func (l *loopyWriter) processData() (bool, error) { remainingBytes := len(dataItem.h) + reader.Remaining() - hSize - dSize size := hSize + dSize - var buf *[]byte - - if hSize != 0 && dSize == 0 { - buf = &dataItem.h - } else { - // Note: this is only necessary because the http2.Framer does not support - // partially writing a frame, so the sequence must be materialized into a buffer. - // TODO: Revisit once https://github.com/golang/go/issues/66655 is addressed. - pool := l.bufferPool - if pool == nil { - // Note that this is only supposed to be nil in tests. Otherwise, stream is - // always initialized with a BufferPool. - pool = mem.DefaultBufferPool() + l.writeBuf = l.writeBuf[:0] + if hSize > 0 { + l.writeBuf = append(l.writeBuf, dataItem.h[:hSize]) + } + if dSize > 0 { + var err error + l.writeBuf, err = reader.Peek(dSize, l.writeBuf) + if err != nil { + // This must never happen since the reader must have at least dSize + // bytes. + // Log an error to fail tests. + l.logger.Errorf("unexpected error while reading Data frame payload: %v", err) + return false, err } - buf = pool.Get(size) - defer pool.Put(buf) - - copy((*buf)[:hSize], dataItem.h) - _, _ = reader.Read((*buf)[hSize:]) } // Now that outgoing flow controls are checked we can replenish str's write quota @@ -1032,7 +1037,14 @@ func (l *loopyWriter) processData() (bool, error) { if dataItem.onEachWrite != nil { dataItem.onEachWrite() } - if err := l.framer.fr.WriteData(dataItem.streamID, endStream, (*buf)[:size]); err != nil { + err := l.framer.writeData(dataItem.streamID, endStream, l.writeBuf) + reader.Discard(dSize) + if cap(l.writeBuf) > maxWriteBufSize { + l.writeBuf = nil + } else { + clear(l.writeBuf) + } + if err != nil { return false, err } str.bytesOutStanding += size @@ -1040,7 +1052,7 @@ func (l *loopyWriter) processData() (bool, error) { dataItem.h = dataItem.h[hSize:] if remainingBytes == 0 { // All the data from that message was written out. - _ = reader.Close() + reader.Close() str.itl.dequeue() } if str.itl.isEmpty() { diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go index dfc0f224ec8..7cfbc9637b8 100644 --- a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go +++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go @@ -28,7 +28,7 @@ import ( // writeQuota is a soft limit on the amount of data a stream can // schedule before some of it is written out. type writeQuota struct { - quota int32 + _ noCopy // get waits on read from when quota goes less than or equal to zero. // replenish writes on it when quota goes positive again. ch chan struct{} @@ -38,16 +38,17 @@ type writeQuota struct { // It is implemented as a field so that it can be updated // by tests. replenish func(n int) + quota int32 } -func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota { - w := &writeQuota{ - quota: sz, - ch: make(chan struct{}, 1), - done: done, - } +// init allows a writeQuota to be initialized in-place, which is useful for +// resetting a buffer or for avoiding a heap allocation when the buffer is +// embedded in another struct. +func (w *writeQuota) init(sz int32, done <-chan struct{}) { + w.quota = sz + w.ch = make(chan struct{}, 1) + w.done = done w.replenish = w.realReplenish - return w } func (w *writeQuota) get(sz int32) error { @@ -67,9 +68,9 @@ func (w *writeQuota) get(sz int32) error { func (w *writeQuota) realReplenish(n int) { sz := int32(n) - a := atomic.AddInt32(&w.quota, sz) - b := a - sz - if b <= 0 && a > 0 { + newQuota := atomic.AddInt32(&w.quota, sz) + previousQuota := newQuota - sz + if previousQuota <= 0 && newQuota > 0 { select { case w.ch <- struct{}{}: default: diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index d954a64c38f..7ab3422b8a2 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -50,7 +50,7 @@ import ( // NewServerHandlerTransport returns a ServerTransport handling gRPC from // inside an http.Handler, or writes an HTTP error to w and returns an error. // It requires that the http Server supports HTTP/2. -func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) { +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) { if r.Method != http.MethodPost { w.Header().Set("Allow", http.MethodPost) msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) @@ -170,7 +170,7 @@ type serverHandlerTransport struct { // TODO make sure this is consistent across handler_server and http2_server contentSubtype string - stats []stats.Handler + stats stats.Handler logger *grpclog.PrefixLogger bufferPool mem.BufferPool @@ -274,15 +274,13 @@ func (ht *serverHandlerTransport) writeStatus(s *ServerStream, st *status.Status } }) - if err == nil { // transport has not been closed + if err == nil && ht.stats != nil { // transport has not been closed // Note: The trailer fields are compressed with hpack after this call returns. // No WireLength field is set here. s.hdrMu.Lock() - for _, sh := range ht.stats { - sh.HandleRPC(s.Context(), &stats.OutTrailer{ - Trailer: s.trailer.Copy(), - }) - } + ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + Trailer: s.trailer.Copy(), + }) s.hdrMu.Unlock() } ht.Close(errors.New("finished writing status")) @@ -374,19 +372,23 @@ func (ht *serverHandlerTransport) writeHeader(s *ServerStream, md metadata.MD) e ht.rw.(http.Flusher).Flush() }) - if err == nil { - for _, sh := range ht.stats { - // Note: The header fields are compressed with hpack after this call returns. - // No WireLength field is set here. - sh.HandleRPC(s.Context(), &stats.OutHeader{ - Header: md.Copy(), - Compression: s.sendCompress, - }) - } + if err == nil && ht.stats != nil { + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + ht.stats.HandleRPC(s.Context(), &stats.OutHeader{ + Header: md.Copy(), + Compression: s.sendCompress, + }) } return err } +func (ht *serverHandlerTransport) adjustWindow(*ServerStream, uint32) { +} + +func (ht *serverHandlerTransport) updateWindow(*ServerStream, uint32) { +} + func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*ServerStream)) { // With this transport type there will be exactly 1 stream: this HTTP request. var cancel context.CancelFunc @@ -411,11 +413,9 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream ctx = metadata.NewIncomingContext(ctx, ht.headerMD) req := ht.req s := &ServerStream{ - Stream: &Stream{ + Stream: Stream{ id: 0, // irrelevant ctx: ctx, - requestRead: func(int) {}, - buf: newRecvBuffer(), method: req.URL.Path, recvCompress: req.Header.Get("grpc-encoding"), contentSubtype: ht.contentSubtype, @@ -424,9 +424,11 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream st: ht, headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } - s.trReader = &transportReader{ - reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf}, - windowHandler: func(int) {}, + s.Stream.buf.init() + s.readRequester = s + s.trReader = transportReader{ + reader: recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: &s.buf}, + windowHandler: s, } // readerDone is closed when the Body.Read-ing goroutine exits. diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 7cb238794fb..65b4ab2439e 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -44,6 +44,7 @@ import ( "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" "google.golang.org/grpc/internal/proxyattributes" + istats "google.golang.org/grpc/internal/stats" istatus "google.golang.org/grpc/internal/status" isyscall "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" @@ -105,7 +106,7 @@ type http2Client struct { kp keepalive.ClientParameters keepaliveEnabled bool - statsHandlers []stats.Handler + statsHandler stats.Handler initialWindowSize int32 @@ -335,14 +336,14 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts writerDone: make(chan struct{}), goAway: make(chan struct{}), keepaliveDone: make(chan struct{}), - framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), + framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize, opts.BufferPool), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, activeStreams: make(map[uint32]*ClientStream), isSecure: isSecure, perRPCCreds: perRPCCreds, kp: kp, - statsHandlers: opts.StatsHandlers, + statsHandler: istats.NewCombinedHandler(opts.StatsHandlers...), initialWindowSize: initialWindowSize, nextID: 1, maxConcurrentStreams: defaultMaxStreamsClient, @@ -386,15 +387,14 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts updateFlowControl: t.updateFlowControl, } } - for _, sh := range t.statsHandlers { - t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ + if t.statsHandler != nil { + t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{ RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, }) - connBegin := &stats.ConnBegin{ + t.statsHandler.HandleConn(t.ctx, &stats.ConnBegin{ Client: true, - } - sh.HandleConn(t.ctx, connBegin) + }) } if t.keepaliveEnabled { t.kpDormancyCond = sync.NewCond(&t.mu) @@ -481,10 +481,9 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientStream { // TODO(zhaoq): Handle uint32 overflow of Stream.id. s := &ClientStream{ - Stream: &Stream{ + Stream: Stream{ method: callHdr.Method, sendCompress: callHdr.SendCompress, - buf: newRecvBuffer(), contentSubtype: callHdr.ContentSubtype, }, ct: t, @@ -492,26 +491,21 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientSt headerChan: make(chan struct{}), doneFunc: callHdr.DoneFunc, } - s.wq = newWriteQuota(defaultWriteQuota, s.done) - s.requestRead = func(n int) { - t.adjustWindow(s, uint32(n)) - } + s.Stream.buf.init() + s.Stream.wq.init(defaultWriteQuota, s.done) + s.readRequester = s // The client side stream context should have exactly the same life cycle with the user provided context. // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done. // So we use the original context here instead of creating a copy. s.ctx = ctx - s.trReader = &transportReader{ - reader: &recvBufferReader{ - ctx: s.ctx, - ctxDone: s.ctx.Done(), - recv: s.buf, - closeStream: func(err error) { - s.Close(err) - }, - }, - windowHandler: func(n int) { - t.updateWindow(s, uint32(n)) + s.trReader = transportReader{ + reader: recvBufferReader{ + ctx: s.ctx, + ctxDone: s.ctx.Done(), + recv: &s.buf, + clientStream: s, }, + windowHandler: s, } return s } @@ -823,7 +817,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS return nil }, onOrphaned: cleanup, - wq: s.wq, + wq: &s.wq, } firstTry := true var ch chan struct{} @@ -854,7 +848,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS transportDrainRequired = t.nextID > MaxStreamID s.id = hdr.streamID - s.fc = &inFlow{limit: uint32(t.initialWindowSize)} + s.fc = inFlow{limit: uint32(t.initialWindowSize)} t.activeStreams[s.id] = s t.mu.Unlock() @@ -905,27 +899,23 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true} } } - if len(t.statsHandlers) != 0 { + if t.statsHandler != nil { header, ok := metadata.FromOutgoingContext(ctx) if ok { header.Set("user-agent", t.userAgent) } else { header = metadata.Pairs("user-agent", t.userAgent) } - for _, sh := range t.statsHandlers { - // Note: The header fields are compressed with hpack after this call returns. - // No WireLength field is set here. - // Note: Creating a new stats object to prevent pollution. - outHeader := &stats.OutHeader{ - Client: true, - FullMethod: callHdr.Method, - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - Compression: callHdr.SendCompress, - Header: header, - } - sh.HandleRPC(s.ctx, outHeader) - } + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + t.statsHandler.HandleRPC(s.ctx, &stats.OutHeader{ + Client: true, + FullMethod: callHdr.Method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: callHdr.SendCompress, + Header: header, + }) } if transportDrainRequired { if t.logger.V(logLevel) { @@ -1002,6 +992,9 @@ func (t *http2Client) closeStream(s *ClientStream, err error, rst bool, rstCode // accessed anymore. func (t *http2Client) Close(err error) { t.conn.SetWriteDeadline(time.Now().Add(time.Second * 10)) + // For background on the deadline value chosen here, see + // https://github.com/grpc/grpc-go/issues/8425#issuecomment-3057938248 . + t.conn.SetReadDeadline(time.Now().Add(time.Second)) t.mu.Lock() // Make sure we only close once. if t.state == closing { @@ -1063,11 +1056,10 @@ func (t *http2Client) Close(err error) { for _, s := range streams { t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false) } - for _, sh := range t.statsHandlers { - connEnd := &stats.ConnEnd{ + if t.statsHandler != nil { + t.statsHandler.HandleConn(t.ctx, &stats.ConnEnd{ Client: true, - } - sh.HandleConn(t.ctx, connEnd) + }) } } @@ -1178,7 +1170,7 @@ func (t *http2Client) updateFlowControl(n uint32) { }) } -func (t *http2Client) handleData(f *http2.DataFrame) { +func (t *http2Client) handleData(f *parsedDataFrame) { size := f.Header().Length var sendBDPPing bool if t.bdpEst != nil { @@ -1222,22 +1214,15 @@ func (t *http2Client) handleData(f *http2.DataFrame) { t.closeStream(s, io.EOF, true, http2.ErrCodeFlowControl, status.New(codes.Internal, err.Error()), nil, false) return } + dataLen := f.data.Len() if f.Header().Flags.Has(http2.FlagDataPadded) { - if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + if w := s.fc.onRead(size - uint32(dataLen)); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) } } - // TODO(bradfitz, zhaoq): A copy is required here because there is no - // guarantee f.Data() is consumed before the arrival of next frame. - // Can this copy be eliminated? - if len(f.Data()) > 0 { - pool := t.bufferPool - if pool == nil { - // Note that this is only supposed to be nil in tests. Otherwise, stream is - // always initialized with a BufferPool. - pool = mem.DefaultBufferPool() - } - s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) + if dataLen > 0 { + f.data.Ref() + s.write(recvMsg{buffer: f.data}) } } // The server has closed the stream without sending trailers. Record that @@ -1477,17 +1462,14 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { contentTypeErr = "malformed header: missing HTTP content-type" grpcMessage string recvCompress string - httpStatusCode *int httpStatusErr string - rawStatusCode = codes.Unknown + // the code from the grpc-status header, if present + grpcStatusCode = codes.Unknown // headerError is set if an error is encountered while parsing the headers headerError string + httpStatus string ) - if initialHeader { - httpStatusErr = "malformed header: missing HTTP status" - } - for _, hf := range frame.Fields { switch hf.Name { case "content-type": @@ -1507,36 +1489,11 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) return } - rawStatusCode = codes.Code(uint32(code)) + grpcStatusCode = codes.Code(uint32(code)) case "grpc-message": grpcMessage = decodeGrpcMessage(hf.Value) case ":status": - c, err := strconv.ParseInt(hf.Value, 10, 32) - if err != nil { - se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err)) - t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) - return - } - statusCode := int(c) - if statusCode >= 100 && statusCode < 200 { - if endStream { - se := status.New(codes.Internal, fmt.Sprintf( - "protocol error: informational header with status code %d must not have END_STREAM set", statusCode)) - t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) - } - return - } - httpStatusCode = &statusCode - if statusCode == 200 { - httpStatusErr = "" - break - } - - httpStatusErr = fmt.Sprintf( - "unexpected HTTP status code received from server: %d (%s)", - statusCode, - http.StatusText(statusCode), - ) + httpStatus = hf.Value default: if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { break @@ -1551,25 +1508,52 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { } } - if !isGRPC || httpStatusErr != "" { - var code = codes.Internal // when header does not include HTTP status, return INTERNAL - - if httpStatusCode != nil { + // If a non-gRPC response is received, then evaluate the HTTP status to + // process the response and close the stream. + // In case http status doesn't provide any error information (status : 200), + // then evalute response code to be Unknown. + if !isGRPC { + var grpcErrorCode = codes.Internal + if httpStatus == "" { + httpStatusErr = "malformed header: missing HTTP status" + } else { + // Parse the status codes (e.g. "200", 404"). + statusCode, err := strconv.Atoi(httpStatus) + if err != nil { + se := status.New(grpcErrorCode, fmt.Sprintf("transport: malformed http-status: %v", err)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + if statusCode >= 100 && statusCode < 200 { + if endStream { + se := status.New(codes.Internal, fmt.Sprintf( + "protocol error: informational header with status code %d must not have END_STREAM set", statusCode)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + } + // In case of informational headers, return. + return + } + httpStatusErr = fmt.Sprintf( + "unexpected HTTP status code received from server: %d (%s)", + statusCode, + http.StatusText(statusCode), + ) var ok bool - code, ok = HTTPStatusConvTab[*httpStatusCode] + grpcErrorCode, ok = HTTPStatusConvTab[statusCode] if !ok { - code = codes.Unknown + grpcErrorCode = codes.Unknown } } var errs []string if httpStatusErr != "" { errs = append(errs, httpStatusErr) } + if contentTypeErr != "" { errs = append(errs, contentTypeErr) } - // Verify the HTTP response is a 200. - se := status.New(code, strings.Join(errs, "; ")) + + se := status.New(grpcErrorCode, strings.Join(errs, "; ")) t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) return } @@ -1600,22 +1584,20 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { } } - for _, sh := range t.statsHandlers { + if t.statsHandler != nil { if !endStream { - inHeader := &stats.InHeader{ + t.statsHandler.HandleRPC(s.ctx, &stats.InHeader{ Client: true, WireLength: int(frame.Header().Length), Header: metadata.MD(mdata).Copy(), Compression: s.recvCompress, - } - sh.HandleRPC(s.ctx, inHeader) + }) } else { - inTrailer := &stats.InTrailer{ + t.statsHandler.HandleRPC(s.ctx, &stats.InTrailer{ Client: true, WireLength: int(frame.Header().Length), Trailer: metadata.MD(mdata).Copy(), - } - sh.HandleRPC(s.ctx, inTrailer) + }) } } @@ -1623,7 +1605,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - status := istatus.NewWithProto(rawStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader]) + status := istatus.NewWithProto(grpcStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader]) // If client received END_STREAM from server while stream was still active, // send RST_STREAM. @@ -1670,7 +1652,7 @@ func (t *http2Client) reader(errCh chan<- error) { // loop to keep reading incoming messages on this transport. for { t.controlBuf.throttle() - frame, err := t.framer.fr.ReadFrame() + frame, err := t.framer.readFrame() if t.keepaliveEnabled { atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) } @@ -1685,7 +1667,7 @@ func (t *http2Client) reader(errCh chan<- error) { if s != nil { // use error detail to provide better err message code := http2ErrConvTab[se.Code] - errorDetail := t.framer.fr.ErrorDetail() + errorDetail := t.framer.errorDetail() var msg string if errorDetail != nil { msg = errorDetail.Error() @@ -1703,8 +1685,9 @@ func (t *http2Client) reader(errCh chan<- error) { switch frame := frame.(type) { case *http2.MetaHeadersFrame: t.operateHeaders(frame) - case *http2.DataFrame: + case *parsedDataFrame: t.handleData(frame) + frame.data.Free() case *http2.RSTStreamFrame: t.handleRSTStream(frame) case *http2.SettingsFrame: diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 83cee314c8f..6f78a6b0c8c 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -35,6 +35,8 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + "google.golang.org/protobuf/proto" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" @@ -42,7 +44,6 @@ import ( istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/mem" - "google.golang.org/protobuf/proto" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -86,7 +87,7 @@ type http2Server struct { // updates, reset streams, and various settings) to the controller. controlBuf *controlBuffer fc *trInFlow - stats []stats.Handler + stats stats.Handler // Keepalive and max-age parameters for the server. kp keepalive.ServerParameters // Keepalive enforcement policy. @@ -168,7 +169,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, if config.MaxHeaderListSize != nil { maxHeaderListSize = *config.MaxHeaderListSize } - framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize) + framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize, config.BufferPool) // Send initial settings as connection preface to client. isettings := []http2.Setting{{ ID: http2.SettingMaxFrameSize, @@ -260,7 +261,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, fc: &trInFlow{limit: uint32(icwz)}, state: reachable, activeStreams: make(map[uint32]*ServerStream), - stats: config.StatsHandlers, + stats: config.StatsHandler, kp: kp, idle: time.Now(), kep: kep, @@ -390,16 +391,15 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade } t.maxStreamID = streamID - buf := newRecvBuffer() s := &ServerStream{ - Stream: &Stream{ - id: streamID, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, + Stream: Stream{ + id: streamID, + fc: inFlow{limit: uint32(t.initialWindowSize)}, }, st: t, headerWireLength: int(frame.Header().Length), } + s.Stream.buf.init() var ( // if false, content-type was missing or invalid isGRPC = false @@ -640,25 +640,21 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade t.channelz.SocketMetrics.StreamsStarted.Add(1) t.channelz.SocketMetrics.LastRemoteStreamCreatedTimestamp.Store(time.Now().UnixNano()) } - s.requestRead = func(n int) { - t.adjustWindow(s, uint32(n)) - } + s.readRequester = s s.ctxDone = s.ctx.Done() - s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) - s.trReader = &transportReader{ - reader: &recvBufferReader{ + s.Stream.wq.init(defaultWriteQuota, s.ctxDone) + s.trReader = transportReader{ + reader: recvBufferReader{ ctx: s.ctx, ctxDone: s.ctxDone, - recv: s.buf, - }, - windowHandler: func(n int) { - t.updateWindow(s, uint32(n)) + recv: &s.buf, }, + windowHandler: s, } // Register the stream with loopy. t.controlBuf.put(®isterStream{ streamID: s.id, - wq: s.wq, + wq: &s.wq, }) handle(s) return nil @@ -674,7 +670,7 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStre }() for { t.controlBuf.throttle() - frame, err := t.framer.fr.ReadFrame() + frame, err := t.framer.readFrame() atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) if err != nil { if se, ok := err.(http2.StreamError); ok { @@ -711,8 +707,9 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStre }) continue } - case *http2.DataFrame: + case *parsedDataFrame: t.handleData(frame) + frame.data.Free() case *http2.RSTStreamFrame: t.handleRSTStream(frame) case *http2.SettingsFrame: @@ -792,7 +789,7 @@ func (t *http2Server) updateFlowControl(n uint32) { } -func (t *http2Server) handleData(f *http2.DataFrame) { +func (t *http2Server) handleData(f *parsedDataFrame) { size := f.Header().Length var sendBDPPing bool if t.bdpEst != nil { @@ -837,22 +834,15 @@ func (t *http2Server) handleData(f *http2.DataFrame) { t.closeStream(s, true, http2.ErrCodeFlowControl, false) return } + dataLen := f.data.Len() if f.Header().Flags.Has(http2.FlagDataPadded) { - if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + if w := s.fc.onRead(size - uint32(dataLen)); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) } } - // TODO(bradfitz, zhaoq): A copy is required here because there is no - // guarantee f.Data() is consumed before the arrival of next frame. - // Can this copy be eliminated? - if len(f.Data()) > 0 { - pool := t.bufferPool - if pool == nil { - // Note that this is only supposed to be nil in tests. Otherwise, stream is - // always initialized with a BufferPool. - pool = mem.DefaultBufferPool() - } - s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) + if dataLen > 0 { + f.data.Ref() + s.write(recvMsg{buffer: f.data}) } } if f.StreamEnded() { @@ -1059,14 +1049,13 @@ func (t *http2Server) writeHeaderLocked(s *ServerStream) error { t.closeStream(s, true, http2.ErrCodeInternal, false) return ErrHeaderListSizeLimitViolation } - for _, sh := range t.stats { + if t.stats != nil { // Note: Headers are compressed with hpack after this call returns. // No WireLength field is set here. - outHeader := &stats.OutHeader{ + t.stats.HandleRPC(s.Context(), &stats.OutHeader{ Header: s.header.Copy(), Compression: s.sendCompress, - } - sh.HandleRPC(s.Context(), outHeader) + }) } return nil } @@ -1134,10 +1123,10 @@ func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error { // Send a RST_STREAM after the trailers if the client has not already half-closed. rst := s.getState() == streamActive t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) - for _, sh := range t.stats { + if t.stats != nil { // Note: The trailer fields are compressed with hpack after this call returns. // No WireLength field is set here. - sh.HandleRPC(s.Context(), &stats.OutTrailer{ + t.stats.HandleRPC(s.Context(), &stats.OutTrailer{ Trailer: s.trailer.Copy(), }) } @@ -1305,7 +1294,8 @@ func (t *http2Server) Close(err error) { // deleteStream deletes the stream s from transport's active streams. func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) { t.mu.Lock() - if _, ok := t.activeStreams[s.id]; ok { + _, isActive := t.activeStreams[s.id] + if isActive { delete(t.activeStreams, s.id) if len(t.activeStreams) == 0 { t.idle = time.Now() @@ -1313,7 +1303,7 @@ func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) { } t.mu.Unlock() - if channelz.IsOn() { + if isActive && channelz.IsOn() { if eosReceived { t.channelz.SocketMetrics.StreamsSucceeded.Add(1) } else { diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index e3663f87f39..6209eb23cdc 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -25,7 +25,6 @@ import ( "fmt" "io" "math" - "net" "net/http" "net/url" "strconv" @@ -37,6 +36,7 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" "google.golang.org/grpc/codes" + "google.golang.org/grpc/mem" ) const ( @@ -300,11 +300,11 @@ type bufWriter struct { buf []byte offset int batchSize int - conn net.Conn + conn io.Writer err error } -func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter { +func newBufWriter(conn io.Writer, batchSize int, pool *sync.Pool) *bufWriter { w := &bufWriter{ batchSize: batchSize, conn: conn, @@ -388,15 +388,35 @@ func toIOError(err error) error { return ioError{error: err} } +type parsedDataFrame struct { + http2.FrameHeader + data mem.Buffer +} + +func (df *parsedDataFrame) StreamEnded() bool { + return df.FrameHeader.Flags.Has(http2.FlagDataEndStream) +} + type framer struct { - writer *bufWriter - fr *http2.Framer + writer *bufWriter + fr *http2.Framer + headerBuf []byte // cached slice for framer headers to reduce heap allocs. + reader io.Reader + dataFrame parsedDataFrame // Cached data frame to avoid heap allocations. + pool mem.BufferPool + errDetail error } var writeBufferPoolMap = make(map[int]*sync.Pool) var writeBufferMutex sync.Mutex -func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer { +func newFramer(conn io.ReadWriter, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32, memPool mem.BufferPool) *framer { + if memPool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream + // is always initialized with a BufferPool. + memPool = mem.DefaultBufferPool() + } + if writeBufferSize < 0 { writeBufferSize = 0 } @@ -412,6 +432,8 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBu f := &framer{ writer: w, fr: http2.NewFramer(w, r), + reader: r, + pool: memPool, } f.fr.SetMaxReadFrameSize(http2MaxFrameLen) // Opt-in to Frame reuse API on framer to reduce garbage. @@ -422,6 +444,146 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBu return f } +// writeData writes a DATA frame. +// +// It is the caller's responsibility not to violate the maximum frame size. +func (f *framer) writeData(streamID uint32, endStream bool, data [][]byte) error { + var flags http2.Flags + if endStream { + flags = http2.FlagDataEndStream + } + length := uint32(0) + for _, d := range data { + length += uint32(len(d)) + } + // TODO: Replace the header write with the framer API being added in + // https://github.com/golang/go/issues/66655. + f.headerBuf = append(f.headerBuf[:0], + byte(length>>16), + byte(length>>8), + byte(length), + byte(http2.FrameData), + byte(flags), + byte(streamID>>24), + byte(streamID>>16), + byte(streamID>>8), + byte(streamID)) + if _, err := f.writer.Write(f.headerBuf); err != nil { + return err + } + for _, d := range data { + if _, err := f.writer.Write(d); err != nil { + return err + } + } + return nil +} + +// readFrame reads a single frame. The returned Frame is only valid +// until the next call to readFrame. +func (f *framer) readFrame() (any, error) { + f.errDetail = nil + fh, err := f.fr.ReadFrameHeader() + if err != nil { + f.errDetail = f.fr.ErrorDetail() + return nil, err + } + // Read the data frame directly from the underlying io.Reader to avoid + // copies. + if fh.Type == http2.FrameData { + err = f.readDataFrame(fh) + return &f.dataFrame, err + } + fr, err := f.fr.ReadFrameForHeader(fh) + if err != nil { + f.errDetail = f.fr.ErrorDetail() + return nil, err + } + return fr, err +} + +// errorDetail returns a more detailed error of the last error +// returned by framer.readFrame. For instance, if readFrame +// returns a StreamError with code PROTOCOL_ERROR, errorDetail +// will say exactly what was invalid. errorDetail is not guaranteed +// to return a non-nil value. +// errorDetail is reset after the next call to readFrame. +func (f *framer) errorDetail() error { + return f.errDetail +} + +func (f *framer) readDataFrame(fh http2.FrameHeader) (err error) { + if fh.StreamID == 0 { + // DATA frames MUST be associated with a stream. If a + // DATA frame is received whose stream identifier + // field is 0x0, the recipient MUST respond with a + // connection error (Section 5.4.1) of type + // PROTOCOL_ERROR. + f.errDetail = errors.New("DATA frame with stream ID 0") + return http2.ConnectionError(http2.ErrCodeProtocol) + } + // Converting a *[]byte to a mem.SliceBuffer incurs a heap allocation. This + // conversion is performed by mem.NewBuffer. To avoid the extra allocation + // a []byte is allocated directly if required and cast to a mem.SliceBuffer. + var buf []byte + // poolHandle is the pointer returned by the buffer pool (if it's used.). + var poolHandle *[]byte + useBufferPool := !mem.IsBelowBufferPoolingThreshold(int(fh.Length)) + if useBufferPool { + poolHandle = f.pool.Get(int(fh.Length)) + buf = *poolHandle + defer func() { + if err != nil { + f.pool.Put(poolHandle) + } + }() + } else { + buf = make([]byte, int(fh.Length)) + } + if fh.Flags.Has(http2.FlagDataPadded) { + if fh.Length == 0 { + return io.ErrUnexpectedEOF + } + // This initial 1-byte read can be inefficient for unbuffered readers, + // but it allows the rest of the payload to be read directly to the + // start of the destination slice. This makes it easy to return the + // original slice back to the buffer pool. + if _, err := io.ReadFull(f.reader, buf[:1]); err != nil { + return err + } + padSize := buf[0] + buf = buf[:len(buf)-1] + if int(padSize) > len(buf) { + // If the length of the padding is greater than the + // length of the frame payload, the recipient MUST + // treat this as a connection error. + // Filed: https://github.com/http2/http2-spec/issues/610 + f.errDetail = errors.New("pad size larger than data payload") + return http2.ConnectionError(http2.ErrCodeProtocol) + } + if _, err := io.ReadFull(f.reader, buf); err != nil { + return err + } + buf = buf[:len(buf)-int(padSize)] + } else if _, err := io.ReadFull(f.reader, buf); err != nil { + return err + } + + f.dataFrame.FrameHeader = fh + if useBufferPool { + // Update the handle to point to the (potentially re-sliced) buf. + *poolHandle = buf + f.dataFrame.data = mem.NewBuffer(poolHandle, f.pool) + } else { + f.dataFrame.data = mem.SliceBuffer(buf) + } + return nil +} + +func (df *parsedDataFrame) Header() http2.FrameHeader { + return df.FrameHeader +} + func getWriteBufferPool(size int) *sync.Pool { writeBufferMutex.Lock() defer writeBufferMutex.Unlock() diff --git a/vendor/google.golang.org/grpc/internal/transport/server_stream.go b/vendor/google.golang.org/grpc/internal/transport/server_stream.go index cf8da0b52d0..ed6a13b7501 100644 --- a/vendor/google.golang.org/grpc/internal/transport/server_stream.go +++ b/vendor/google.golang.org/grpc/internal/transport/server_stream.go @@ -32,7 +32,7 @@ import ( // ServerStream implements streaming functionality for a gRPC server. type ServerStream struct { - *Stream // Embed for common stream functionality. + Stream // Embed for common stream functionality. st internalServerTransport ctxDone <-chan struct{} // closed at the end of stream. Cache of ctx.Done() (for performance) @@ -43,12 +43,13 @@ type ServerStream struct { // Holds compressor names passed in grpc-accept-encoding metadata from the // client. clientAdvertisedCompressors string - headerWireLength int // hdrMu protects outgoing header and trailer metadata. hdrMu sync.Mutex header metadata.MD // the outgoing header metadata. Updated by WriteHeader. headerSent atomic.Bool // atomically set when the headers are sent out. + + headerWireLength int } // Read reads an n byte message from the input stream. @@ -178,3 +179,11 @@ func (s *ServerStream) SetTrailer(md metadata.MD) error { s.hdrMu.Unlock() return nil } + +func (s *ServerStream) requestRead(n int) { + s.st.adjustWindow(s, uint32(n)) +} + +func (s *ServerStream) updateWindow(n int) { + s.st.updateWindow(s, uint32(n)) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 7dd53e80a75..5ff83a7d7d7 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -68,11 +68,11 @@ type recvBuffer struct { err error } -func newRecvBuffer() *recvBuffer { - b := &recvBuffer{ - c: make(chan recvMsg, 1), - } - return b +// init allows a recvBuffer to be initialized in-place, which is useful +// for resetting a buffer or for avoiding a heap allocation when the buffer +// is embedded in another struct. +func (b *recvBuffer) init() { + b.c = make(chan recvMsg, 1) } func (b *recvBuffer) put(r recvMsg) { @@ -123,12 +123,13 @@ func (b *recvBuffer) get() <-chan recvMsg { // recvBufferReader implements io.Reader interface to read the data from // recvBuffer. type recvBufferReader struct { - closeStream func(error) // Closes the client transport stream with the given error and nil trailer metadata. - ctx context.Context - ctxDone <-chan struct{} // cache of ctx.Done() (for performance). - recv *recvBuffer - last mem.Buffer // Stores the remaining data in the previous calls. - err error + _ noCopy + clientStream *ClientStream // The client transport stream is closed with a status representing ctx.Err() and nil trailer metadata. + ctx context.Context + ctxDone <-chan struct{} // cache of ctx.Done() (for performance). + recv *recvBuffer + last mem.Buffer // Stores the remaining data in the previous calls. + err error } func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) { @@ -139,7 +140,7 @@ func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) { n, r.last = mem.ReadUnsafe(header, r.last) return n, nil } - if r.closeStream != nil { + if r.clientStream != nil { n, r.err = r.readMessageHeaderClient(header) } else { n, r.err = r.readMessageHeader(header) @@ -164,7 +165,7 @@ func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) { } return buf, nil } - if r.closeStream != nil { + if r.clientStream != nil { buf, r.err = r.readClient(n) } else { buf, r.err = r.read(n) @@ -209,7 +210,7 @@ func (r *recvBufferReader) readMessageHeaderClient(header []byte) (n int, err er // TODO: delaying ctx error seems like a unnecessary side effect. What // we really want is to mark the stream as done, and return ctx error // faster. - r.closeStream(ContextErr(r.ctx.Err())) + r.clientStream.Close(ContextErr(r.ctx.Err())) m := <-r.recv.get() return r.readMessageHeaderAdditional(m, header) case m := <-r.recv.get(): @@ -236,7 +237,7 @@ func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) { // TODO: delaying ctx error seems like a unnecessary side effect. What // we really want is to mark the stream as done, and return ctx error // faster. - r.closeStream(ContextErr(r.ctx.Err())) + r.clientStream.Close(ContextErr(r.ctx.Err())) m := <-r.recv.get() return r.readAdditional(m, n) case m := <-r.recv.get(): @@ -285,27 +286,32 @@ const ( // Stream represents an RPC in the transport layer. type Stream struct { - id uint32 ctx context.Context // the associated context of the stream method string // the associated RPC method of the stream recvCompress string sendCompress string - buf *recvBuffer - trReader *transportReader - fc *inFlow - wq *writeQuota - // Callback to state application's intentions to read data. This - // is used to adjust flow control, if needed. - requestRead func(int) - - state streamState + readRequester readRequester // contentSubtype is the content-subtype for requests. // this must be lowercase or the behavior is undefined. contentSubtype string trailer metadata.MD // the key-value map of trailer metadata. + + // Non-pointer fields are at the end to optimize GC performance. + state streamState + id uint32 + buf recvBuffer + trReader transportReader + fc inFlow + wq writeQuota +} + +// readRequester is used to state application's intentions to read data. This +// is used to adjust flow control, if needed. +type readRequester interface { + requestRead(int) } func (s *Stream) swapState(st streamState) streamState { @@ -355,7 +361,7 @@ func (s *Stream) ReadMessageHeader(header []byte) (err error) { if er := s.trReader.er; er != nil { return er } - s.requestRead(len(header)) + s.readRequester.requestRead(len(header)) for len(header) != 0 { n, err := s.trReader.ReadMessageHeader(header) header = header[n:] @@ -378,7 +384,7 @@ func (s *Stream) read(n int) (data mem.BufferSlice, err error) { if er := s.trReader.er; er != nil { return nil, er } - s.requestRead(n) + s.readRequester.requestRead(n) for n != 0 { buf, err := s.trReader.Read(n) var bufLen int @@ -401,16 +407,34 @@ func (s *Stream) read(n int) (data mem.BufferSlice, err error) { return data, nil } +// noCopy may be embedded into structs which must not be copied +// after the first use. +// +// See https://golang.org/issues/8005#issuecomment-190753527 +// for details. +type noCopy struct { +} + +func (*noCopy) Lock() {} +func (*noCopy) Unlock() {} + // transportReader reads all the data available for this Stream from the transport and // passes them into the decoder, which converts them into a gRPC message stream. // The error is io.EOF when the stream is done or another non-nil error if // the stream broke. type transportReader struct { - reader *recvBufferReader + _ noCopy // The handler to control the window update procedure for both this // particular stream and the associated transport. - windowHandler func(int) + windowHandler windowHandler er error + reader recvBufferReader +} + +// The handler to control the window update procedure for both this +// particular stream and the associated transport. +type windowHandler interface { + updateWindow(int) } func (t *transportReader) ReadMessageHeader(header []byte) (int, error) { @@ -419,7 +443,7 @@ func (t *transportReader) ReadMessageHeader(header []byte) (int, error) { t.er = err return 0, err } - t.windowHandler(n) + t.windowHandler.updateWindow(n) return n, nil } @@ -429,7 +453,7 @@ func (t *transportReader) Read(n int) (mem.Buffer, error) { t.er = err return buf, err } - t.windowHandler(buf.Len()) + t.windowHandler.updateWindow(buf.Len()) return buf, nil } @@ -454,7 +478,7 @@ type ServerConfig struct { ConnectionTimeout time.Duration Credentials credentials.TransportCredentials InTapHandle tap.ServerInHandle - StatsHandlers []stats.Handler + StatsHandler stats.Handler KeepaliveParams keepalive.ServerParameters KeepalivePolicy keepalive.EnforcementPolicy InitialWindowSize int32 @@ -615,6 +639,8 @@ type internalServerTransport interface { write(s *ServerStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error writeStatus(s *ServerStream, st *status.Status) error incrMsgRecv() + adjustWindow(s *ServerStream, n uint32) + updateWindow(s *ServerStream, n uint32) } // connectionErrorf creates an ConnectionError with the specified error description. diff --git a/vendor/google.golang.org/grpc/mem/buffer_pool.go b/vendor/google.golang.org/grpc/mem/buffer_pool.go index c37c58c0233..f211e727451 100644 --- a/vendor/google.golang.org/grpc/mem/buffer_pool.go +++ b/vendor/google.golang.org/grpc/mem/buffer_pool.go @@ -32,6 +32,9 @@ type BufferPool interface { Get(length int) *[]byte // Put returns a buffer to the pool. + // + // The provided pointer must hold a prefix of the buffer obtained via + // BufferPool.Get to ensure the buffer's entire capacity can be re-used. Put(*[]byte) } @@ -118,7 +121,11 @@ type sizedBufferPool struct { } func (p *sizedBufferPool) Get(size int) *[]byte { - buf := p.pool.Get().(*[]byte) + buf, ok := p.pool.Get().(*[]byte) + if !ok { + buf := make([]byte, size, p.defaultSize) + return &buf + } b := *buf clear(b[:cap(b)]) *buf = b[:size] @@ -137,12 +144,6 @@ func (p *sizedBufferPool) Put(buf *[]byte) { func newSizedBufferPool(size int) *sizedBufferPool { return &sizedBufferPool{ - pool: sync.Pool{ - New: func() any { - buf := make([]byte, size) - return &buf - }, - }, defaultSize: size, } } @@ -160,6 +161,7 @@ type simpleBufferPool struct { func (p *simpleBufferPool) Get(size int) *[]byte { bs, ok := p.pool.Get().(*[]byte) if ok && cap(*bs) >= size { + clear((*bs)[:cap(*bs)]) *bs = (*bs)[:size] return bs } diff --git a/vendor/google.golang.org/grpc/mem/buffer_slice.go b/vendor/google.golang.org/grpc/mem/buffer_slice.go index af510d20c5a..084fb19c6d1 100644 --- a/vendor/google.golang.org/grpc/mem/buffer_slice.go +++ b/vendor/google.golang.org/grpc/mem/buffer_slice.go @@ -19,6 +19,7 @@ package mem import ( + "fmt" "io" ) @@ -117,43 +118,36 @@ func (s BufferSlice) MaterializeToBuffer(pool BufferPool) Buffer { // Reader returns a new Reader for the input slice after taking references to // each underlying buffer. -func (s BufferSlice) Reader() Reader { +func (s BufferSlice) Reader() *Reader { s.Ref() - return &sliceReader{ + return &Reader{ data: s, len: s.Len(), } } // Reader exposes a BufferSlice's data as an io.Reader, allowing it to interface -// with other parts systems. It also provides an additional convenience method -// Remaining(), which returns the number of unread bytes remaining in the slice. +// with other systems. +// // Buffers will be freed as they are read. -type Reader interface { - io.Reader - io.ByteReader - // Close frees the underlying BufferSlice and never returns an error. Subsequent - // calls to Read will return (0, io.EOF). - Close() error - // Remaining returns the number of unread bytes remaining in the slice. - Remaining() int - // Reset frees the currently held buffer slice and starts reading from the - // provided slice. This allows reusing the reader object. - Reset(s BufferSlice) -} - -type sliceReader struct { +// +// A Reader can be constructed from a BufferSlice; alternatively the zero value +// of a Reader may be used after calling Reset on it. +type Reader struct { data BufferSlice len int // The index into data[0].ReadOnlyData(). bufferIdx int } -func (r *sliceReader) Remaining() int { +// Remaining returns the number of unread bytes remaining in the slice. +func (r *Reader) Remaining() int { return r.len } -func (r *sliceReader) Reset(s BufferSlice) { +// Reset frees the currently held buffer slice and starts reading from the +// provided slice. This allows reusing the reader object. +func (r *Reader) Reset(s BufferSlice) { r.data.Free() s.Ref() r.data = s @@ -161,14 +155,16 @@ func (r *sliceReader) Reset(s BufferSlice) { r.bufferIdx = 0 } -func (r *sliceReader) Close() error { +// Close frees the underlying BufferSlice and never returns an error. Subsequent +// calls to Read will return (0, io.EOF). +func (r *Reader) Close() error { r.data.Free() r.data = nil r.len = 0 return nil } -func (r *sliceReader) freeFirstBufferIfEmpty() bool { +func (r *Reader) freeFirstBufferIfEmpty() bool { if len(r.data) == 0 || r.bufferIdx != len(r.data[0].ReadOnlyData()) { return false } @@ -179,7 +175,7 @@ func (r *sliceReader) freeFirstBufferIfEmpty() bool { return true } -func (r *sliceReader) Read(buf []byte) (n int, _ error) { +func (r *Reader) Read(buf []byte) (n int, _ error) { if r.len == 0 { return 0, io.EOF } @@ -202,7 +198,8 @@ func (r *sliceReader) Read(buf []byte) (n int, _ error) { return n, nil } -func (r *sliceReader) ReadByte() (byte, error) { +// ReadByte reads a single byte. +func (r *Reader) ReadByte() (byte, error) { if r.len == 0 { return 0, io.EOF } @@ -290,3 +287,59 @@ nextBuffer: } } } + +// Discard skips the next n bytes, returning the number of bytes discarded. +// +// It frees buffers as they are fully consumed. +// +// If Discard skips fewer than n bytes, it also returns an error. +func (r *Reader) Discard(n int) (discarded int, err error) { + total := n + for n > 0 && r.len > 0 { + curData := r.data[0].ReadOnlyData() + curSize := min(n, len(curData)-r.bufferIdx) + n -= curSize + r.len -= curSize + r.bufferIdx += curSize + if r.bufferIdx >= len(curData) { + r.data[0].Free() + r.data = r.data[1:] + r.bufferIdx = 0 + } + } + discarded = total - n + if n > 0 { + return discarded, fmt.Errorf("insufficient bytes in reader") + } + return discarded, nil +} + +// Peek returns the next n bytes without advancing the reader. +// +// Peek appends results to the provided res slice and returns the updated slice. +// This pattern allows re-using the storage of res if it has sufficient +// capacity. +// +// The returned subslices are views into the underlying buffers and are only +// valid until the reader is advanced past the corresponding buffer. +// +// If Peek returns fewer than n bytes, it also returns an error. +func (r *Reader) Peek(n int, res [][]byte) ([][]byte, error) { + for i := 0; n > 0 && i < len(r.data); i++ { + curData := r.data[i].ReadOnlyData() + start := 0 + if i == 0 { + start = r.bufferIdx + } + curSize := min(n, len(curData)-start) + if curSize == 0 { + continue + } + res = append(res, curData[start:start+curSize]) + n -= curSize + } + if n > 0 { + return nil, fmt.Errorf("insufficient bytes in reader") + } + return res, nil +} diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index ee0ff969af4..1e783febf92 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -47,9 +47,6 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error { } // check if the context has the relevant information to prepareMsg - if rpcInfo.preloaderInfo == nil { - return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo is nil") - } if rpcInfo.preloaderInfo.codec == nil { return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo.codec is nil") } diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go index 92f52922115..92fdc3afabf 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go @@ -21,7 +21,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.6 +// protoc-gen-go v1.36.10 // protoc v5.27.1 // source: grpc/reflection/v1/reflection.proto diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go index 5253e862f0a..c803cf3ba1b 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.6 +// protoc-gen-go v1.36.10 // protoc v5.27.1 // grpc/reflection/v1alpha/reflection.proto is a deprecated file. diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 47ea09f5c9b..6b04c9e8735 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -657,8 +657,20 @@ type streamReader interface { Read(n int) (mem.BufferSlice, error) } +// noCopy may be embedded into structs which must not be copied +// after the first use. +// +// See https://golang.org/issues/8005#issuecomment-190753527 +// for details. +type noCopy struct { +} + +func (*noCopy) Lock() {} +func (*noCopy) Unlock() {} + // parser reads complete gRPC messages from the underlying reader. type parser struct { + _ noCopy // r is the underlying reader. // See the comment on recvMsg for the permissible // error types. @@ -949,7 +961,7 @@ func recv(p *parser, c baseCodec, s recvCompressor, dc Decompressor, m any, maxR // Information about RPC type rpcInfo struct { failfast bool - preloaderInfo *compressorInfo + preloaderInfo compressorInfo } // Information about Preloader @@ -968,7 +980,7 @@ type rpcInfoContextKey struct{} func newContextWithRPCInfo(ctx context.Context, failfast bool, codec baseCodec, cp Compressor, comp encoding.Compressor) context.Context { return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{ failfast: failfast, - preloaderInfo: &compressorInfo{ + preloaderInfo: compressorInfo{ codec: codec, cp: cp, comp: comp, diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 1da2a542acd..ddd37734119 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -124,7 +124,8 @@ type serviceInfo struct { // Server is a gRPC server to serve RPC requests. type Server struct { - opts serverOptions + opts serverOptions + statsHandler stats.Handler mu sync.Mutex // guards following lis map[net.Listener]bool @@ -692,13 +693,14 @@ func NewServer(opt ...ServerOption) *Server { o.apply(&opts) } s := &Server{ - lis: make(map[net.Listener]bool), - opts: opts, - conns: make(map[string]map[transport.ServerTransport]bool), - services: make(map[string]*serviceInfo), - quit: grpcsync.NewEvent(), - done: grpcsync.NewEvent(), - channelz: channelz.RegisterServer(""), + lis: make(map[net.Listener]bool), + opts: opts, + statsHandler: istats.NewCombinedHandler(opts.statsHandlers...), + conns: make(map[string]map[transport.ServerTransport]bool), + services: make(map[string]*serviceInfo), + quit: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + channelz: channelz.RegisterServer(""), } chainUnaryServerInterceptors(s) chainStreamServerInterceptors(s) @@ -999,7 +1001,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { ConnectionTimeout: s.opts.connectionTimeout, Credentials: s.opts.creds, InTapHandle: s.opts.inTapHandle, - StatsHandlers: s.opts.statsHandlers, + StatsHandler: s.statsHandler, KeepaliveParams: s.opts.keepaliveParams, KeepalivePolicy: s.opts.keepalivePolicy, InitialWindowSize: s.opts.initialWindowSize, @@ -1036,18 +1038,18 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, rawConn net.Conn) { ctx = transport.SetConnection(ctx, rawConn) ctx = peer.NewContext(ctx, st.Peer()) - for _, sh := range s.opts.statsHandlers { - ctx = sh.TagConn(ctx, &stats.ConnTagInfo{ + if s.statsHandler != nil { + ctx = s.statsHandler.TagConn(ctx, &stats.ConnTagInfo{ RemoteAddr: st.Peer().Addr, LocalAddr: st.Peer().LocalAddr, }) - sh.HandleConn(ctx, &stats.ConnBegin{}) + s.statsHandler.HandleConn(ctx, &stats.ConnBegin{}) } defer func() { st.Close(errors.New("finished serving streams for the server transport")) - for _, sh := range s.opts.statsHandlers { - sh.HandleConn(ctx, &stats.ConnEnd{}) + if s.statsHandler != nil { + s.statsHandler.HandleConn(ctx, &stats.ConnEnd{}) } }() @@ -1104,7 +1106,7 @@ var _ http.Handler = (*Server)(nil) // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers, s.opts.bufferPool) + st, err := transport.NewServerHandlerTransport(w, r, s.statsHandler, s.opts.bufferPool) if err != nil { // Errors returned from transport.NewServerHandlerTransport have // already been written to w. @@ -1198,12 +1200,8 @@ func (s *Server) sendResponse(ctx context.Context, stream *transport.ServerStrea return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize) } err = stream.Write(hdr, payload, opts) - if err == nil { - if len(s.opts.statsHandlers) != 0 { - for _, sh := range s.opts.statsHandlers { - sh.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now())) - } - } + if err == nil && s.statsHandler != nil { + s.statsHandler.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now())) } return err } @@ -1245,16 +1243,15 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info } func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { - shs := s.opts.statsHandlers - if len(shs) != 0 || trInfo != nil || channelz.IsOn() { + sh := s.statsHandler + if sh != nil || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { s.incrCallsStarted() } var statsBegin *stats.Begin - for _, sh := range shs { - beginTime := time.Now() + if sh != nil { statsBegin = &stats.Begin{ - BeginTime: beginTime, + BeginTime: time.Now(), IsClientStream: false, IsServerStream: false, } @@ -1282,7 +1279,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt trInfo.tr.Finish() } - for _, sh := range shs { + if sh != nil { end := &stats.End{ BeginTime: statsBegin.BeginTime, EndTime: time.Now(), @@ -1379,7 +1376,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt } var payInfo *payloadInfo - if len(shs) != 0 || len(binlogs) != 0 { + if sh != nil || len(binlogs) != 0 { payInfo = &payloadInfo{} defer payInfo.free() } @@ -1405,7 +1402,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } - for _, sh := range shs { + if sh != nil { sh.HandleRPC(ctx, &stats.InPayload{ RecvTime: time.Now(), Payload: v, @@ -1579,33 +1576,30 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv if channelz.IsOn() { s.incrCallsStarted() } - shs := s.opts.statsHandlers + sh := s.statsHandler var statsBegin *stats.Begin - if len(shs) != 0 { - beginTime := time.Now() + if sh != nil { statsBegin = &stats.Begin{ - BeginTime: beginTime, + BeginTime: time.Now(), IsClientStream: sd.ClientStreams, IsServerStream: sd.ServerStreams, } - for _, sh := range shs { - sh.HandleRPC(ctx, statsBegin) - } + sh.HandleRPC(ctx, statsBegin) } ctx = NewContextWithServerTransportStream(ctx, stream) ss := &serverStream{ ctx: ctx, s: stream, - p: &parser{r: stream, bufferPool: s.opts.bufferPool}, + p: parser{r: stream, bufferPool: s.opts.bufferPool}, codec: s.getCodec(stream.ContentSubtype()), desc: sd, maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, trInfo: trInfo, - statsHandler: shs, + statsHandler: sh, } - if len(shs) != 0 || trInfo != nil || channelz.IsOn() { + if sh != nil || trInfo != nil || channelz.IsOn() { // See comment in processUnaryRPC on defers. defer func() { if trInfo != nil { @@ -1619,7 +1613,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv ss.mu.Unlock() } - if len(shs) != 0 { + if sh != nil { end := &stats.End{ BeginTime: statsBegin.BeginTime, EndTime: time.Now(), @@ -1627,9 +1621,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv if err != nil && err != io.EOF { end.Error = toRPCErr(err) } - for _, sh := range shs { - sh.HandleRPC(ctx, end) - } + sh.HandleRPC(ctx, end) } if channelz.IsOn() { @@ -1818,19 +1810,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Ser method := sm[pos+1:] // FromIncomingContext is expensive: skip if there are no statsHandlers - if len(s.opts.statsHandlers) > 0 { + if s.statsHandler != nil { md, _ := metadata.FromIncomingContext(ctx) - for _, sh := range s.opts.statsHandlers { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) - sh.HandleRPC(ctx, &stats.InHeader{ - FullMethod: stream.Method(), - RemoteAddr: t.Peer().Addr, - LocalAddr: t.Peer().LocalAddr, - Compression: stream.RecvCompress(), - WireLength: stream.HeaderWireLength(), - Header: md, - }) - } + ctx = s.statsHandler.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) + s.statsHandler.HandleRPC(ctx, &stats.InHeader{ + FullMethod: stream.Method(), + RemoteAddr: t.Peer().Addr, + LocalAddr: t.Peer().LocalAddr, + Compression: stream.RecvCompress(), + WireLength: stream.HeaderWireLength(), + Header: md, + }) } // To have calls in stream callouts work. Will delete once all stats handler // calls come from the gRPC layer. diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 0a0af8961f0..ca87ff9776e 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -177,6 +177,8 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth return cc.NewStream(ctx, desc, method, opts...) } +var emptyMethodConfig = serviceconfig.MethodConfig{} + func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { // Start tracking the RPC for idleness purposes. This is where a stream is // created for both streaming and unary RPCs, and hence is a good place to @@ -217,7 +219,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth return nil, err } - var mc serviceconfig.MethodConfig + mc := &emptyMethodConfig var onCommit func() newStream := func(ctx context.Context, done func()) (iresolver.ClientStream, error) { return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, nameResolutionDelayed, opts...) @@ -240,7 +242,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth if rpcConfig.Context != nil { ctx = rpcConfig.Context } - mc = rpcConfig.MethodConfig + mc = &rpcConfig.MethodConfig onCommit = rpcConfig.OnCommitted if rpcConfig.Interceptor != nil { rpcInfo.Context = nil @@ -258,7 +260,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth return newStream(ctx, func() {}) } -func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), nameResolutionDelayed bool, opts ...CallOption) (_ iresolver.ClientStream, err error) { +func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc *serviceconfig.MethodConfig, onCommit, doneFunc func(), nameResolutionDelayed bool, opts ...CallOption) (_ iresolver.ClientStream, err error) { callInfo := defaultCallInfo() if mc.WaitForReady != nil { callInfo.failFast = !*mc.WaitForReady @@ -325,7 +327,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client cs := &clientStream{ callHdr: callHdr, ctx: ctx, - methodConfig: &mc, + methodConfig: mc, opts: opts, callInfo: callInfo, cc: cc, @@ -418,19 +420,21 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.compressorV0, cs.compressorV1) method := cs.callHdr.Method var beginTime time.Time - shs := cs.cc.dopts.copts.StatsHandlers - for _, sh := range shs { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast, NameResolutionDelay: cs.nameResolutionDelay}) + sh := cs.cc.statsHandler + if sh != nil { beginTime = time.Now() - begin := &stats.Begin{ + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{ + FullMethodName: method, FailFast: cs.callInfo.failFast, + NameResolutionDelay: cs.nameResolutionDelay, + }) + sh.HandleRPC(ctx, &stats.Begin{ Client: true, BeginTime: beginTime, FailFast: cs.callInfo.failFast, IsClientStream: cs.desc.ClientStreams, IsServerStream: cs.desc.ServerStreams, IsTransparentRetryAttempt: isTransparent, - } - sh.HandleRPC(ctx, begin) + }) } var trInfo *traceInfo @@ -461,7 +465,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) beginTime: beginTime, cs: cs, decompressorV0: cs.cc.dopts.dc, - statsHandlers: shs, + statsHandler: sh, trInfo: trInfo, }, nil } @@ -482,10 +486,8 @@ func (a *csAttempt) getTransport() error { if a.trInfo != nil { a.trInfo.firstLine.SetRemoteAddr(a.transport.RemoteAddr()) } - if pick.blocked { - for _, sh := range a.statsHandlers { - sh.HandleRPC(a.ctx, &stats.DelayedPickComplete{}) - } + if pick.blocked && a.statsHandler != nil { + a.statsHandler.HandleRPC(a.ctx, &stats.DelayedPickComplete{}) } return nil } @@ -529,7 +531,7 @@ func (a *csAttempt) newStream() error { } a.transportStream = s a.ctx = s.Context() - a.parser = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool} + a.parser = parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool} return nil } @@ -601,7 +603,7 @@ type csAttempt struct { cs *clientStream transport transport.ClientTransport transportStream *transport.ClientStream - parser *parser + parser parser pickResult balancer.PickResult finished bool @@ -615,8 +617,8 @@ type csAttempt struct { // and cleared when the finish method is called. trInfo *traceInfo - statsHandlers []stats.Handler - beginTime time.Time + statsHandler stats.Handler + beginTime time.Time // set for newStream errors that may be transparently retried allowTransparentRetry bool @@ -1110,17 +1112,15 @@ func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength } return io.EOF } - if len(a.statsHandlers) != 0 { - for _, sh := range a.statsHandlers { - sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now())) - } + if a.statsHandler != nil { + a.statsHandler.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now())) } return nil } func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { cs := a.cs - if len(a.statsHandlers) != 0 && payInfo == nil { + if a.statsHandler != nil && payInfo == nil { payInfo = &payloadInfo{} defer payInfo.free() } @@ -1141,7 +1141,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { // Only initialize this state once per stream. a.decompressorSet = true } - if err := recv(a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decompressorV1, false); err != nil { + if err := recv(&a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decompressorV1, false); err != nil { if err == io.EOF { if statusErr := a.transportStream.Status().Err(); statusErr != nil { return statusErr @@ -1163,8 +1163,8 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { } a.mu.Unlock() } - for _, sh := range a.statsHandlers { - sh.HandleRPC(a.ctx, &stats.InPayload{ + if a.statsHandler != nil { + a.statsHandler.HandleRPC(a.ctx, &stats.InPayload{ Client: true, RecvTime: time.Now(), Payload: m, @@ -1179,7 +1179,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { } // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - if err := recv(a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decompressorV1, false); err == io.EOF { + if err := recv(&a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decompressorV1, false); err == io.EOF { return a.transportStream.Status().Err() // non-server streaming Recv returns nil on success } else if err != nil { return toRPCErr(err) @@ -1217,15 +1217,14 @@ func (a *csAttempt) finish(err error) { ServerLoad: balancerload.Parse(tr), }) } - for _, sh := range a.statsHandlers { - end := &stats.End{ + if a.statsHandler != nil { + a.statsHandler.HandleRPC(a.ctx, &stats.End{ Client: true, BeginTime: a.beginTime, EndTime: time.Now(), Trailer: tr, Error: err, - } - sh.HandleRPC(a.ctx, end) + }) } if a.trInfo != nil && a.trInfo.tr != nil { if err == nil { @@ -1331,7 +1330,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin return nil, err } as.transportStream = s - as.parser = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool} + as.parser = parser{r: s, bufferPool: ac.dopts.copts.BufferPool} ac.incrCallsStarted() if desc != unaryStreamDesc { // Listen on stream context to cleanup when the stream context is @@ -1374,7 +1373,7 @@ type addrConnStream struct { decompressorSet bool decompressorV0 Decompressor decompressorV1 encoding.Compressor - parser *parser + parser parser // mu guards finished and is held for the entire finish method. mu sync.Mutex @@ -1487,7 +1486,7 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { // Only initialize this state once per stream. as.decompressorSet = true } - if err := recv(as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err != nil { + if err := recv(&as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err != nil { if err == io.EOF { if statusErr := as.transportStream.Status().Err(); statusErr != nil { return statusErr @@ -1509,7 +1508,7 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - if err := recv(as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err == io.EOF { + if err := recv(&as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err == io.EOF { return as.transportStream.Status().Err() // non-server streaming Recv returns nil on success } else if err != nil { return toRPCErr(err) @@ -1597,7 +1596,7 @@ type ServerStream interface { type serverStream struct { ctx context.Context s *transport.ServerStream - p *parser + p parser codec baseCodec desc *StreamDesc @@ -1614,7 +1613,7 @@ type serverStream struct { maxSendMessageSize int trInfo *traceInfo - statsHandler []stats.Handler + statsHandler stats.Handler binlogs []binarylog.MethodLogger // serverHeaderBinlogged indicates whether server header has been logged. It @@ -1750,10 +1749,8 @@ func (ss *serverStream) SendMsg(m any) (err error) { binlog.Log(ss.ctx, sm) } } - if len(ss.statsHandler) != 0 { - for _, sh := range ss.statsHandler { - sh.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now())) - } + if ss.statsHandler != nil { + ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now())) } return nil } @@ -1784,11 +1781,11 @@ func (ss *serverStream) RecvMsg(m any) (err error) { } }() var payInfo *payloadInfo - if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { + if ss.statsHandler != nil || len(ss.binlogs) != 0 { payInfo = &payloadInfo{} defer payInfo.free() } - if err := recv(ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, payInfo, ss.decompressorV1, true); err != nil { + if err := recv(&ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, payInfo, ss.decompressorV1, true); err != nil { if err == io.EOF { if len(ss.binlogs) != 0 { chc := &binarylog.ClientHalfClose{} @@ -1808,16 +1805,14 @@ func (ss *serverStream) RecvMsg(m any) (err error) { return toRPCErr(err) } ss.recvFirstMsg = true - if len(ss.statsHandler) != 0 { - for _, sh := range ss.statsHandler { - sh.HandleRPC(ss.s.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: m, - Length: payInfo.uncompressedBytes.Len(), - WireLength: payInfo.compressedLength + headerLen, - CompressedLength: payInfo.compressedLength, - }) - } + if ss.statsHandler != nil { + ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: m, + Length: payInfo.uncompressedBytes.Len(), + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, + }) } if len(ss.binlogs) != 0 { cm := &binarylog.ClientMessage{ @@ -1834,7 +1829,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { } // Special handling for non-client-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - if err := recv(ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, nil, ss.decompressorV1, true); err == io.EOF { + if err := recv(&ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, nil, ss.decompressorV1, true); err == io.EOF { return nil } else if err != nil { return err diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 76f2e0d060f..9e6d018fb7f 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.76.0" +const Version = "1.77.0" diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go index 669133d04dc..c96e4483460 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go @@ -32,7 +32,7 @@ var byteType = reflect.TypeOf(byte(0)) func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescriptors) protoreflect.FieldDescriptor { f := new(filedesc.Field) f.L0.ParentFile = filedesc.SurrogateProto2 - f.L1.EditionFeatures = f.L0.ParentFile.L1.EditionFeatures + packed := false for len(tag) > 0 { i := strings.IndexByte(tag, ',') if i < 0 { @@ -108,7 +108,7 @@ func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescri f.L1.StringName.InitJSON(jsonName) } case s == "packed": - f.L1.EditionFeatures.IsPacked = true + packed = true case strings.HasPrefix(s, "def="): // The default tag is special in that everything afterwards is the // default regardless of the presence of commas. @@ -121,6 +121,13 @@ func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescri tag = strings.TrimPrefix(tag[i:], ",") } + // Update EditionFeatures after the loop and after we know whether this is + // a proto2 or proto3 field. + f.L1.EditionFeatures = f.L0.ParentFile.L1.EditionFeatures + if packed { + f.L1.EditionFeatures.IsPacked = true + } + // The generator uses the group message name instead of the field name. // We obtain the real field name by lowercasing the group name. if f.L1.Kind == protoreflect.GroupKind { diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go index 099b2bf451b..9aa7a9bb776 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go @@ -424,27 +424,34 @@ func (d *Decoder) parseFieldName() (tok Token, err error) { return Token{}, d.newSyntaxError("invalid field name: %s", errId(d.in)) } -// parseTypeName parses Any type URL or extension field name. The name is -// enclosed in [ and ] characters. The C++ parser does not handle many legal URL -// strings. This implementation is more liberal and allows for the pattern -// ^[-_a-zA-Z0-9]+([./][-_a-zA-Z0-9]+)*`). Whitespaces and comments are allowed -// in between [ ], '.', '/' and the sub names. +// parseTypeName parses an Any type URL or an extension field name. The name is +// enclosed in [ and ] characters. We allow almost arbitrary type URL prefixes, +// closely following the text-format spec [1,2]. We implement "ExtensionName | +// AnyName" as follows (with some exceptions for backwards compatibility): +// +// char = [-_a-zA-Z0-9] +// url_char = char | [.~!$&'()*+,;=] | "%", hex, hex +// +// Ident = char, { char } +// TypeName = Ident, { ".", Ident } ; +// UrlPrefix = url_char, { url_char | "/" } ; +// ExtensionName = "[", TypeName, "]" ; +// AnyName = "[", UrlPrefix, "/", TypeName, "]" ; +// +// Additionally, we allow arbitrary whitespace and comments between [ and ]. +// +// [1] https://protobuf.dev/reference/protobuf/textformat-spec/#characters +// [2] https://protobuf.dev/reference/protobuf/textformat-spec/#field-names func (d *Decoder) parseTypeName() (Token, error) { - startPos := len(d.orig) - len(d.in) // Use alias s to advance first in order to use d.in for error handling. - // Caller already checks for [ as first character. + // Caller already checks for [ as first character (d.in[0] == '['). s := consume(d.in[1:], 0) if len(s) == 0 { return Token{}, ErrUnexpectedEOF } + // Collect everything between [ and ] in name. var name []byte - for len(s) > 0 && isTypeNameChar(s[0]) { - name = append(name, s[0]) - s = s[1:] - } - s = consume(s, 0) - var closed bool for len(s) > 0 && !closed { switch { @@ -452,23 +459,20 @@ func (d *Decoder) parseTypeName() (Token, error) { s = s[1:] closed = true - case s[0] == '/', s[0] == '.': - if len(name) > 0 && (name[len(name)-1] == '/' || name[len(name)-1] == '.') { - return Token{}, d.newSyntaxError("invalid type URL/extension field name: %s", - d.orig[startPos:len(d.orig)-len(s)+1]) - } + case s[0] == '/' || isTypeNameChar(s[0]) || isUrlExtraChar(s[0]): name = append(name, s[0]) - s = s[1:] - s = consume(s, 0) - for len(s) > 0 && isTypeNameChar(s[0]) { - name = append(name, s[0]) - s = s[1:] + s = consume(s[1:], 0) + + // URL percent-encoded chars + case s[0] == '%': + if len(s) < 3 || !isHexChar(s[1]) || !isHexChar(s[2]) { + return Token{}, d.parseTypeNameError(s, 3) } - s = consume(s, 0) + name = append(name, s[0], s[1], s[2]) + s = consume(s[3:], 0) default: - return Token{}, d.newSyntaxError( - "invalid type URL/extension field name: %s", d.orig[startPos:len(d.orig)-len(s)+1]) + return Token{}, d.parseTypeNameError(s, 1) } } @@ -476,15 +480,38 @@ func (d *Decoder) parseTypeName() (Token, error) { return Token{}, ErrUnexpectedEOF } - // First character cannot be '.'. Last character cannot be '.' or '/'. - size := len(name) - if size == 0 || name[0] == '.' || name[size-1] == '.' || name[size-1] == '/' { - return Token{}, d.newSyntaxError("invalid type URL/extension field name: %s", - d.orig[startPos:len(d.orig)-len(s)]) + // Split collected name on last '/' into urlPrefix and typeName (if '/' is + // present). + typeName := name + if i := bytes.LastIndexByte(name, '/'); i != -1 { + urlPrefix := name[:i] + typeName = name[i+1:] + + // urlPrefix may be empty (for backwards compatibility). + // If non-empty, it must not start with '/'. + if len(urlPrefix) > 0 && urlPrefix[0] == '/' { + return Token{}, d.parseTypeNameError(s, 0) + } } + // typeName must not be empty (note: "" splits to [""]) and all identifier + // parts must not be empty. + for _, ident := range bytes.Split(typeName, []byte{'.'}) { + if len(ident) == 0 { + return Token{}, d.parseTypeNameError(s, 0) + } + } + + // typeName must not contain any percent-encoded or special URL chars. + for _, b := range typeName { + if b == '%' || (b != '.' && isUrlExtraChar(b)) { + return Token{}, d.parseTypeNameError(s, 0) + } + } + + startPos := len(d.orig) - len(d.in) + endPos := len(d.orig) - len(s) d.in = s - endPos := len(d.orig) - len(d.in) d.consume(0) return Token{ @@ -496,16 +523,32 @@ func (d *Decoder) parseTypeName() (Token, error) { }, nil } -func isTypeNameChar(b byte) bool { - return (b == '-' || b == '_' || - ('0' <= b && b <= '9') || - ('a' <= b && b <= 'z') || - ('A' <= b && b <= 'Z')) +func (d *Decoder) parseTypeNameError(s []byte, numUnconsumedChars int) error { + return d.newSyntaxError( + "invalid type URL/extension field name: %s", + d.in[:len(d.in)-len(s)+min(numUnconsumedChars, len(s))], + ) } -func isWhiteSpace(b byte) bool { +func isHexChar(b byte) bool { + return ('0' <= b && b <= '9') || + ('a' <= b && b <= 'f') || + ('A' <= b && b <= 'F') +} + +func isTypeNameChar(b byte) bool { + return b == '-' || b == '_' || + ('0' <= b && b <= '9') || + ('a' <= b && b <= 'z') || + ('A' <= b && b <= 'Z') +} + +// isUrlExtraChar complements isTypeNameChar with extra characters that we allow +// in URLs but not in type names. Note that '/' is not included so that it can +// be treated specially. +func isUrlExtraChar(b byte) bool { switch b { - case ' ', '\n', '\r', '\t': + case '.', '~', '!', '$', '&', '(', ')', '*', '+', ',', ';', '=': return true default: return false diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index dbcf90b871f..c775e5832f0 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -32,6 +32,7 @@ const ( EditionProto3 Edition = 999 Edition2023 Edition = 1000 Edition2024 Edition = 1001 + EditionUnstable Edition = 9999 EditionUnsupported Edition = 100000 ) diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index dd31faaeb0a..78f02b1b495 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -330,7 +330,6 @@ func (md *Message) unmarshalFull(b []byte, sb *strs.Builder) { md.L1.Extensions.List[extensionIdx].unmarshalFull(v, sb) extensionIdx++ case genid.DescriptorProto_Options_field_number: - md.unmarshalOptions(v) rawOptions = appendOptions(rawOptions, v) } default: @@ -356,27 +355,6 @@ func (md *Message) unmarshalFull(b []byte, sb *strs.Builder) { md.L2.Options = md.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Message, rawOptions) } -func (md *Message) unmarshalOptions(b []byte) { - for len(b) > 0 { - num, typ, n := protowire.ConsumeTag(b) - b = b[n:] - switch typ { - case protowire.VarintType: - v, m := protowire.ConsumeVarint(b) - b = b[m:] - switch num { - case genid.MessageOptions_MapEntry_field_number: - md.L1.IsMapEntry = protowire.DecodeBool(v) - case genid.MessageOptions_MessageSetWireFormat_field_number: - md.L1.IsMessageSet = protowire.DecodeBool(v) - } - default: - m := protowire.ConsumeFieldValue(num, typ, b) - b = b[m:] - } - } -} - func unmarshalMessageReservedRange(b []byte) (r [2]protoreflect.FieldNumber) { for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index 950a6a325a4..65aaf4d210a 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -26,6 +26,7 @@ const ( Edition_EDITION_PROTO3_enum_value = 999 Edition_EDITION_2023_enum_value = 1000 Edition_EDITION_2024_enum_value = 1001 + Edition_EDITION_UNSTABLE_enum_value = 9999 Edition_EDITION_1_TEST_ONLY_enum_value = 1 Edition_EDITION_2_TEST_ONLY_enum_value = 2 Edition_EDITION_99997_TEST_ONLY_enum_value = 99997 diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go index 229c6980138..4a3bf393ef4 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go @@ -113,6 +113,9 @@ func sizeMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalO } func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if opts.depth--; opts.depth < 0 { + return out, errRecursionDepth + } if wtyp != protowire.BytesType { return out, errUnknown } @@ -170,6 +173,9 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo } func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if opts.depth--; opts.depth < 0 { + return out, errRecursionDepth + } if wtyp != protowire.BytesType { return out, errUnknown } diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go index e0dd21fa5f4..1228b5c8c27 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/decode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go @@ -102,8 +102,7 @@ var errUnknown = errors.New("unknown") func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) { mi.init() - opts.depth-- - if opts.depth < 0 { + if opts.depth--; opts.depth < 0 { return out, errRecursionDepth } if flags.ProtoLegacy && mi.isMessageSet { diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go index 7b2995dde5e..99a1eb95f7c 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/validate.go +++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go @@ -68,9 +68,13 @@ func Validate(mt protoreflect.MessageType, in protoiface.UnmarshalInput) (out pr if in.Resolver == nil { in.Resolver = protoregistry.GlobalTypes } + if in.Depth == 0 { + in.Depth = protowire.DefaultRecursionLimit + } o, st := mi.validate(in.Buf, 0, unmarshalOptions{ flags: in.Flags, resolver: in.Resolver, + depth: in.Depth, }) if o.initialized { out.Flags |= protoiface.UnmarshalInitialized @@ -257,6 +261,9 @@ func (mi *MessageInfo) validate(b []byte, groupTag protowire.Number, opts unmars states[0].typ = validationTypeGroup states[0].endGroup = groupTag } + if opts.depth--; opts.depth < 0 { + return out, ValidationInvalid + } initialized := true start := len(b) State: @@ -451,6 +458,13 @@ State: mi: vi.mi, tail: b, }) + if vi.typ == validationTypeMessage || + vi.typ == validationTypeGroup || + vi.typ == validationTypeMap { + if opts.depth--; opts.depth < 0 { + return out, ValidationInvalid + } + } b = v continue State case validationTypeRepeatedVarint: @@ -499,6 +513,9 @@ State: mi: vi.mi, endGroup: num, }) + if opts.depth--; opts.depth < 0 { + return out, ValidationInvalid + } continue State case flags.ProtoLegacy && vi.typ == validationTypeMessageSetItem: typeid, v, n, err := messageset.ConsumeFieldValue(b, false) @@ -521,6 +538,13 @@ State: mi: xvi.mi, tail: b[n:], }) + if xvi.typ == validationTypeMessage || + xvi.typ == validationTypeGroup || + xvi.typ == validationTypeMap { + if opts.depth--; opts.depth < 0 { + return out, ValidationInvalid + } + } b = v continue State } @@ -547,12 +571,14 @@ State: switch st.typ { case validationTypeMessage, validationTypeGroup: numRequiredFields = int(st.mi.numRequiredFields) + opts.depth++ case validationTypeMap: // If this is a map field with a message value that contains // required fields, require that the value be present. if st.mi != nil && st.mi.numRequiredFields > 0 { numRequiredFields = 1 } + opts.depth++ } // If there are more than 64 required fields, this check will // always fail and we will report that the message is potentially diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index 77de0f238ce..763fd82841c 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,7 +52,7 @@ import ( const ( Major = 1 Minor = 36 - Patch = 10 + Patch = 11 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index 4cbf1aeaf79..889d8511d27 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -121,9 +121,8 @@ func (o UnmarshalOptions) unmarshal(b []byte, m protoreflect.Message) (out proto out, err = methods.Unmarshal(in) } else { - o.RecursionLimit-- - if o.RecursionLimit < 0 { - return out, errors.New("exceeded max recursion depth") + if o.RecursionLimit--; o.RecursionLimit < 0 { + return out, errRecursionDepth } err = o.unmarshalMessageSlow(b, m) } @@ -220,6 +219,9 @@ func (o UnmarshalOptions) unmarshalSingular(b []byte, wtyp protowire.Type, m pro } func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv protoreflect.Map, fd protoreflect.FieldDescriptor) (n int, err error) { + if o.RecursionLimit--; o.RecursionLimit < 0 { + return 0, errRecursionDepth + } if wtyp != protowire.BytesType { return 0, errUnknown } @@ -305,3 +307,5 @@ func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv proto var errUnknown = errors.New("BUG: internal error (unknown)") var errDecode = errors.New("cannot parse invalid wire-format data") + +var errRecursionDepth = errors.New("exceeded maximum recursion depth") diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go index 9196288e4ac..40f17af4e3c 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go @@ -108,7 +108,9 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot if f.L1.Path == "" { return nil, errors.New("file path must be populated") } - if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < editionssupport.Minimum || fd.GetEdition() > editionssupport.Maximum) { + if f.L1.Syntax == protoreflect.Editions && + (fd.GetEdition() < editionssupport.Minimum || fd.GetEdition() > editionssupport.Maximum) && + fd.GetEdition() != descriptorpb.Edition_EDITION_UNSTABLE { // Allow cmd/protoc-gen-go/testdata to use any edition for easier // testing of upcoming edition features. if !strings.HasPrefix(fd.GetName(), "cmd/protoc-gen-go/testdata/") { @@ -152,6 +154,7 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot imp := &f.L2.Imports[i] imps.importPublic(imp.Imports()) } + optionImps := importSet{f.Path(): true} if len(fd.GetOptionDependency()) > 0 { optionImports := make(filedesc.FileImports, len(fd.GetOptionDependency())) for i, path := range fd.GetOptionDependency() { @@ -165,10 +168,12 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot } imp.FileDescriptor = f - if imps[imp.Path()] { + if imps[imp.Path()] || optionImps[imp.Path()] { return nil, errors.New("already imported %q", path) } - imps[imp.Path()] = true + // This needs to be a separate map so that we don't recognize non-options + // symbols coming from option imports. + optionImps[imp.Path()] = true } f.L2.OptionImports = func() protoreflect.FileImports { return &optionImports diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go index 697a61b290e..147b8c7398d 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go @@ -46,6 +46,8 @@ func toEditionProto(ed filedesc.Edition) descriptorpb.Edition { return descriptorpb.Edition_EDITION_2023 case filedesc.Edition2024: return descriptorpb.Edition_EDITION_2024 + case filedesc.EditionUnstable: + return descriptorpb.Edition_EDITION_UNSTABLE default: panic(fmt.Sprintf("unknown value for edition: %v", ed)) } @@ -58,7 +60,7 @@ func getFeatureSetFor(ed filedesc.Edition) *descriptorpb.FeatureSet { return def } edpb := toEditionProto(ed) - if defaults.GetMinimumEdition() > edpb || defaults.GetMaximumEdition() < edpb { + if (defaults.GetMinimumEdition() > edpb || defaults.GetMaximumEdition() < edpb) && edpb != descriptorpb.Edition_EDITION_UNSTABLE { // This should never happen protodesc.(FileOptions).New would fail when // initializing the file descriptor. // This most likely means the embedded defaults were not updated. diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 4eacb523c33..0b23faa957c 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -69,6 +69,8 @@ const ( // comparison. Edition_EDITION_2023 Edition = 1000 Edition_EDITION_2024 Edition = 1001 + // A placeholder edition for developing and testing unscheduled features. + Edition_EDITION_UNSTABLE Edition = 9999 // Placeholder editions for testing feature resolution. These should not be // used or relied on outside of tests. Edition_EDITION_1_TEST_ONLY Edition = 1 @@ -91,6 +93,7 @@ var ( 999: "EDITION_PROTO3", 1000: "EDITION_2023", 1001: "EDITION_2024", + 9999: "EDITION_UNSTABLE", 1: "EDITION_1_TEST_ONLY", 2: "EDITION_2_TEST_ONLY", 99997: "EDITION_99997_TEST_ONLY", @@ -105,6 +108,7 @@ var ( "EDITION_PROTO3": 999, "EDITION_2023": 1000, "EDITION_2024": 1001, + "EDITION_UNSTABLE": 9999, "EDITION_1_TEST_ONLY": 1, "EDITION_2_TEST_ONLY": 2, "EDITION_99997_TEST_ONLY": 99997, @@ -4793,11 +4797,11 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "\x18EnumValueDescriptorProto\x12\x12\n" + "\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n" + "\x06number\x18\x02 \x01(\x05R\x06number\x12;\n" + - "\aoptions\x18\x03 \x01(\v2!.google.protobuf.EnumValueOptionsR\aoptions\"\xa7\x01\n" + + "\aoptions\x18\x03 \x01(\v2!.google.protobuf.EnumValueOptionsR\aoptions\"\xb5\x01\n" + "\x16ServiceDescriptorProto\x12\x12\n" + "\x04name\x18\x01 \x01(\tR\x04name\x12>\n" + "\x06method\x18\x02 \x03(\v2&.google.protobuf.MethodDescriptorProtoR\x06method\x129\n" + - "\aoptions\x18\x03 \x01(\v2\x1f.google.protobuf.ServiceOptionsR\aoptions\"\x89\x02\n" + + "\aoptions\x18\x03 \x01(\v2\x1f.google.protobuf.ServiceOptionsR\aoptionsJ\x04\b\x04\x10\x05R\x06stream\"\x89\x02\n" + "\x15MethodDescriptorProto\x12\x12\n" + "\x04name\x18\x01 \x01(\tR\x04name\x12\x1d\n" + "\n" + @@ -5033,14 +5037,15 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "\bSemantic\x12\b\n" + "\x04NONE\x10\x00\x12\a\n" + "\x03SET\x10\x01\x12\t\n" + - "\x05ALIAS\x10\x02*\xa7\x02\n" + + "\x05ALIAS\x10\x02*\xbe\x02\n" + "\aEdition\x12\x13\n" + "\x0fEDITION_UNKNOWN\x10\x00\x12\x13\n" + "\x0eEDITION_LEGACY\x10\x84\a\x12\x13\n" + "\x0eEDITION_PROTO2\x10\xe6\a\x12\x13\n" + "\x0eEDITION_PROTO3\x10\xe7\a\x12\x11\n" + "\fEDITION_2023\x10\xe8\a\x12\x11\n" + - "\fEDITION_2024\x10\xe9\a\x12\x17\n" + + "\fEDITION_2024\x10\xe9\a\x12\x15\n" + + "\x10EDITION_UNSTABLE\x10\x8fN\x12\x17\n" + "\x13EDITION_1_TEST_ONLY\x10\x01\x12\x17\n" + "\x13EDITION_2_TEST_ONLY\x10\x02\x12\x1d\n" + "\x17EDITION_99997_TEST_ONLY\x10\x9d\x8d\x06\x12\x1d\n" + diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 06d584c14be..484c21fd536 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -172,13 +172,14 @@ import ( // ) to obtain a formatter capable of generating timestamps in this format. type Timestamp struct { state protoimpl.MessageState `protogen:"open.v1"` - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. + // Represents seconds of UTC time since Unix epoch 1970-01-01T00:00:00Z. Must + // be between -315576000000 and 315576000000 inclusive (which corresponds to + // 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z). Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 + // Non-negative fractions of a second at nanosecond resolution. This field is + // the nanosecond portion of the duration, not an alternative to seconds. + // Negative second values with fractions must still have non-negative nanos + // values that count forward in time. Must be between 0 and 999,999,999 // inclusive. Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` unknownFields protoimpl.UnknownFields diff --git a/vendor/modules.txt b/vendor/modules.txt index 469e5b9c33c..d55f36779cf 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -11,14 +11,14 @@ filippo.io/edwards25519/field # github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 ## explicit github.com/Azure/go-ntlmssp -# github.com/BurntSushi/toml v1.5.0 +# github.com/BurntSushi/toml v1.6.0 ## explicit; go 1.18 github.com/BurntSushi/toml github.com/BurntSushi/toml/internal # github.com/CiscoM31/godata v1.0.10 ## explicit; go 1.19 github.com/CiscoM31/godata -# github.com/KimMachineGun/automemlimit v0.7.4 +# github.com/KimMachineGun/automemlimit v0.7.5 ## explicit; go 1.22.0 github.com/KimMachineGun/automemlimit/memlimit # github.com/Masterminds/goutils v1.1.1 @@ -88,7 +88,7 @@ github.com/alexedwards/argon2id # github.com/amoghe/go-crypt v0.0.0-20220222110647-20eada5f5964 ## explicit github.com/amoghe/go-crypt -# github.com/antithesishq/antithesis-sdk-go v0.4.3-default-no-op +# github.com/antithesishq/antithesis-sdk-go v0.5.0-default-no-op ## explicit; go 1.20 github.com/antithesishq/antithesis-sdk-go/assert github.com/antithesishq/antithesis-sdk-go/internal @@ -283,7 +283,7 @@ github.com/cenkalti/backoff # github.com/cenkalti/backoff/v5 v5.0.3 ## explicit; go 1.23 github.com/cenkalti/backoff/v5 -# github.com/ceph/go-ceph v0.36.0 +# github.com/ceph/go-ceph v0.37.0 ## explicit; go 1.24.0 github.com/ceph/go-ceph/cephfs github.com/ceph/go-ceph/cephfs/admin @@ -291,6 +291,7 @@ github.com/ceph/go-ceph/common/admin/manager github.com/ceph/go-ceph/common/commands github.com/ceph/go-ceph/internal/commands github.com/ceph/go-ceph/internal/cutil +github.com/ceph/go-ceph/internal/dlsym github.com/ceph/go-ceph/internal/errutil github.com/ceph/go-ceph/internal/log github.com/ceph/go-ceph/internal/retry @@ -316,14 +317,14 @@ github.com/cloudflare/circl/math/mlsbset github.com/cloudflare/circl/sign github.com/cloudflare/circl/sign/ed25519 github.com/cloudflare/circl/sign/ed448 -# github.com/coreos/go-oidc/v3 v3.16.0 +# github.com/coreos/go-oidc/v3 v3.17.0 ## explicit; go 1.24.0 github.com/coreos/go-oidc/v3/oidc # github.com/coreos/go-semver v0.3.1 ## explicit; go 1.8 github.com/coreos/go-semver/semver -# github.com/coreos/go-systemd/v22 v22.5.0 -## explicit; go 1.12 +# github.com/coreos/go-systemd/v22 v22.6.0 +## explicit; go 1.23 github.com/coreos/go-systemd/v22/journal # github.com/cornelk/hashmap v1.0.8 ## explicit; go 1.19 @@ -434,7 +435,7 @@ github.com/felixge/httpsnoop ## explicit; go 1.17 github.com/fsnotify/fsnotify github.com/fsnotify/fsnotify/internal -# github.com/gabriel-vasile/mimetype v1.4.10 +# github.com/gabriel-vasile/mimetype v1.4.12 ## explicit; go 1.21 github.com/gabriel-vasile/mimetype github.com/gabriel-vasile/mimetype/internal/charset @@ -600,7 +601,7 @@ github.com/go-playground/locales/en # github.com/go-playground/universal-translator v0.18.1 ## explicit; go 1.18 github.com/go-playground/universal-translator -# github.com/go-playground/validator/v10 v10.28.0 +# github.com/go-playground/validator/v10 v10.30.1 ## explicit; go 1.24.0 github.com/go-playground/validator/v10 github.com/go-playground/validator/v10/translations/en @@ -678,8 +679,8 @@ github.com/goccy/go-yaml/parser github.com/goccy/go-yaml/printer github.com/goccy/go-yaml/scanner github.com/goccy/go-yaml/token -# github.com/gofrs/flock v0.12.1 -## explicit; go 1.21.0 +# github.com/gofrs/flock v0.13.0 +## explicit; go 1.24.0 github.com/gofrs/flock # github.com/gofrs/uuid v4.4.0+incompatible ## explicit @@ -706,7 +707,7 @@ github.com/golang/protobuf/ptypes/empty # github.com/golang/snappy v0.0.4 ## explicit github.com/golang/snappy -# github.com/gomodule/redigo v1.9.2 +# github.com/gomodule/redigo v1.9.3 ## explicit; go 1.17 github.com/gomodule/redigo/redis # github.com/google/go-cmp v0.7.0 @@ -723,7 +724,7 @@ github.com/google/go-querystring/query # github.com/google/go-tika v0.3.1 ## explicit; go 1.11 github.com/google/go-tika/tika -# github.com/google/go-tpm v0.9.6 +# github.com/google/go-tpm v0.9.7 ## explicit; go 1.22 github.com/google/go-tpm/legacy/tpm2 github.com/google/go-tpm/tpmutil @@ -731,7 +732,7 @@ github.com/google/go-tpm/tpmutil/tbs # github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 ## explicit; go 1.23 github.com/google/pprof/profile -# github.com/google/renameio/v2 v2.0.0 +# github.com/google/renameio/v2 v2.0.1 ## explicit; go 1.13 github.com/google/renameio/v2 # github.com/google/uuid v1.6.0 @@ -851,8 +852,8 @@ github.com/kettek/apng # github.com/kevinburke/ssh_config v1.2.0 ## explicit github.com/kevinburke/ssh_config -# github.com/klauspost/compress v1.18.0 -## explicit; go 1.22 +# github.com/klauspost/compress v1.18.2 +## explicit; go 1.23 github.com/klauspost/compress github.com/klauspost/compress/flate github.com/klauspost/compress/fse @@ -869,6 +870,9 @@ github.com/klauspost/compress/zstd/internal/xxhash # github.com/klauspost/cpuid/v2 v2.2.11 ## explicit; go 1.22 github.com/klauspost/cpuid/v2 +# github.com/klauspost/crc32 v1.3.0 +## explicit; go 1.23.0 +github.com/klauspost/crc32 # github.com/kovidgoyal/go-parallel v1.1.1 ## explicit; go 1.23 github.com/kovidgoyal/go-parallel @@ -1017,7 +1021,7 @@ github.com/mattn/go-isatty # github.com/mattn/go-runewidth v0.0.16 ## explicit; go 1.9 github.com/mattn/go-runewidth -# github.com/mattn/go-sqlite3 v1.14.32 +# github.com/mattn/go-sqlite3 v1.14.33 ## explicit; go 1.19 github.com/mattn/go-sqlite3 # github.com/maxymania/go-system v0.0.0-20170110133659-647cc364bf0b @@ -1033,26 +1037,24 @@ github.com/miekg/dns # github.com/mileusna/useragent v1.3.5 ## explicit; go 1.14 github.com/mileusna/useragent -# github.com/minio/crc64nvme v1.0.2 +# github.com/minio/crc64nvme v1.1.0 ## explicit; go 1.22 github.com/minio/crc64nvme -# github.com/minio/highwayhash v1.0.3 +# github.com/minio/highwayhash v1.0.4-0.20251030100505-070ab1a87a76 ## explicit; go 1.15 github.com/minio/highwayhash # github.com/minio/md5-simd v1.1.2 ## explicit; go 1.14 github.com/minio/md5-simd -# github.com/minio/minio-go/v7 v7.0.95 +# github.com/minio/minio-go/v7 v7.0.97 ## explicit; go 1.23.0 github.com/minio/minio-go/v7 -github.com/minio/minio-go/v7/internal/json github.com/minio/minio-go/v7/pkg/cors github.com/minio/minio-go/v7/pkg/credentials github.com/minio/minio-go/v7/pkg/encrypt github.com/minio/minio-go/v7/pkg/kvcache github.com/minio/minio-go/v7/pkg/lifecycle github.com/minio/minio-go/v7/pkg/notification -github.com/minio/minio-go/v7/pkg/peeker github.com/minio/minio-go/v7/pkg/replication github.com/minio/minio-go/v7/pkg/s3utils github.com/minio/minio-go/v7/pkg/set @@ -1092,7 +1094,7 @@ github.com/munnerz/goautoneg # github.com/nats-io/jwt/v2 v2.8.0 ## explicit; go 1.23.0 github.com/nats-io/jwt/v2 -# github.com/nats-io/nats-server/v2 v2.12.1 +# github.com/nats-io/nats-server/v2 v2.12.3 ## explicit; go 1.24.0 github.com/nats-io/nats-server/v2/conf github.com/nats-io/nats-server/v2/internal/fastrand @@ -1110,14 +1112,14 @@ github.com/nats-io/nats-server/v2/server/stree github.com/nats-io/nats-server/v2/server/sysmem github.com/nats-io/nats-server/v2/server/thw github.com/nats-io/nats-server/v2/server/tpm -# github.com/nats-io/nats.go v1.46.1 +# github.com/nats-io/nats.go v1.48.0 ## explicit; go 1.23.0 github.com/nats-io/nats.go github.com/nats-io/nats.go/encoders/builtin github.com/nats-io/nats.go/internal/parser github.com/nats-io/nats.go/util -# github.com/nats-io/nkeys v0.4.11 -## explicit; go 1.23.0 +# github.com/nats-io/nkeys v0.4.12 +## explicit; go 1.24.0 github.com/nats-io/nkeys # github.com/nats-io/nuid v1.0.1 ## explicit @@ -1129,8 +1131,8 @@ github.com/nxadm/tail/ratelimiter github.com/nxadm/tail/util github.com/nxadm/tail/watch github.com/nxadm/tail/winfile -# github.com/oklog/run v1.1.0 -## explicit; go 1.13 +# github.com/oklog/run v1.2.0 +## explicit; go 1.20 github.com/oklog/run # github.com/olekukonko/errors v1.1.0 ## explicit; go 1.21 @@ -1177,7 +1179,7 @@ github.com/onsi/ginkgo/reporters/stenographer github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty github.com/onsi/ginkgo/types -# github.com/onsi/ginkgo/v2 v2.27.2 +# github.com/onsi/ginkgo/v2 v2.27.4 ## explicit; go 1.23.0 github.com/onsi/ginkgo/v2 github.com/onsi/ginkgo/v2/config @@ -1201,7 +1203,7 @@ github.com/onsi/ginkgo/v2/internal/reporters github.com/onsi/ginkgo/v2/internal/testingtproxy github.com/onsi/ginkgo/v2/reporters github.com/onsi/ginkgo/v2/types -# github.com/onsi/gomega v1.38.2 +# github.com/onsi/gomega v1.38.3 ## explicit; go 1.23.0 github.com/onsi/gomega github.com/onsi/gomega/format @@ -1311,7 +1313,7 @@ github.com/orcaman/concurrent-map # github.com/owncloud/libre-graph-api-go v1.0.5-0.20251107084958-31937a4ea3f1 ## explicit; go 1.18 github.com/owncloud/libre-graph-api-go -# github.com/owncloud/reva/v2 v2.0.0-20251107154850-a122a9538794 +# github.com/owncloud/reva/v2 v2.0.0-20260116122933-81e6e21256eb ## explicit; go 1.24.0 github.com/owncloud/reva/v2/cmd/revad/internal/grace github.com/owncloud/reva/v2/cmd/revad/runtime @@ -1726,8 +1728,8 @@ github.com/pmezard/go-difflib/difflib ## explicit; go 1.16 github.com/pquerna/cachecontrol github.com/pquerna/cachecontrol/cacheobject -# github.com/prometheus/alertmanager v0.28.1 -## explicit; go 1.22.0 +# github.com/prometheus/alertmanager v0.30.0 +## explicit; go 1.24.0 github.com/prometheus/alertmanager/asset github.com/prometheus/alertmanager/featurecontrol github.com/prometheus/alertmanager/matcher/compat @@ -1747,8 +1749,8 @@ github.com/prometheus/client_golang/prometheus/promhttp/internal # github.com/prometheus/client_model v0.6.2 ## explicit; go 1.22.0 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.66.1 -## explicit; go 1.23.0 +# github.com/prometheus/common v0.67.4 +## explicit; go 1.24.0 github.com/prometheus/common/expfmt github.com/prometheus/common/helpers/templates github.com/prometheus/common/model @@ -1877,7 +1879,7 @@ github.com/sergi/go-diff/diffmatchpatch # github.com/sethvargo/go-password v0.3.1 ## explicit; go 1.21 github.com/sethvargo/go-password/password -# github.com/shamaton/msgpack/v2 v2.3.1 +# github.com/shamaton/msgpack/v2 v2.4.0 ## explicit; go 1.20 github.com/shamaton/msgpack/v2 github.com/shamaton/msgpack/v2/def @@ -2085,8 +2087,8 @@ go.etcd.io/bbolt go.etcd.io/bbolt/errors go.etcd.io/bbolt/internal/common go.etcd.io/bbolt/internal/freelist -# go.etcd.io/etcd/api/v3 v3.6.5 -## explicit; go 1.24 +# go.etcd.io/etcd/api/v3 v3.6.7 +## explicit; go 1.24.0 go.etcd.io/etcd/api/v3/authpb go.etcd.io/etcd/api/v3/etcdserverpb go.etcd.io/etcd/api/v3/membershippb @@ -2094,8 +2096,8 @@ go.etcd.io/etcd/api/v3/mvccpb go.etcd.io/etcd/api/v3/v3rpc/rpctypes go.etcd.io/etcd/api/v3/version go.etcd.io/etcd/api/v3/versionpb -# go.etcd.io/etcd/client/pkg/v3 v3.6.5 -## explicit; go 1.24 +# go.etcd.io/etcd/client/pkg/v3 v3.6.7 +## explicit; go 1.24.0 go.etcd.io/etcd/client/pkg/v3/fileutil go.etcd.io/etcd/client/pkg/v3/logutil go.etcd.io/etcd/client/pkg/v3/systemd @@ -2103,8 +2105,8 @@ go.etcd.io/etcd/client/pkg/v3/tlsutil go.etcd.io/etcd/client/pkg/v3/transport go.etcd.io/etcd/client/pkg/v3/types go.etcd.io/etcd/client/pkg/v3/verify -# go.etcd.io/etcd/client/v3 v3.6.5 -## explicit; go 1.24 +# go.etcd.io/etcd/client/v3 v3.6.7 +## explicit; go 1.24.0 go.etcd.io/etcd/client/v3 go.etcd.io/etcd/client/v3/credentials go.etcd.io/etcd/client/v3/internal/endpoint @@ -2129,12 +2131,12 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -# go.opentelemetry.io/auto/sdk v1.1.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/auto/sdk v1.2.1 +## explicit; go 1.24.0 go.opentelemetry.io/auto/sdk go.opentelemetry.io/auto/sdk/internal/telemetry -# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 -## explicit; go 1.23.0 +# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.64.0 +## explicit; go 1.24.0 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal # go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 @@ -2146,11 +2148,12 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv ## explicit; go 1.23.0 go.opentelemetry.io/contrib/zpages go.opentelemetry.io/contrib/zpages/internal -# go.opentelemetry.io/otel v1.38.0 -## explicit; go 1.23.0 +# go.opentelemetry.io/otel v1.39.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute go.opentelemetry.io/otel/attribute/internal +go.opentelemetry.io/otel/attribute/internal/xxhash go.opentelemetry.io/otel/baggage go.opentelemetry.io/otel/codes go.opentelemetry.io/otel/internal/baggage @@ -2160,7 +2163,6 @@ go.opentelemetry.io/otel/semconv/internal go.opentelemetry.io/otel/semconv/v1.10.0 go.opentelemetry.io/otel/semconv/v1.20.0 go.opentelemetry.io/otel/semconv/v1.21.0 -go.opentelemetry.io/otel/semconv/v1.26.0 go.opentelemetry.io/otel/semconv/v1.37.0 go.opentelemetry.io/otel/semconv/v1.37.0/httpconv go.opentelemetry.io/otel/semconv/v1.37.0/otelconv @@ -2184,22 +2186,22 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry -# go.opentelemetry.io/otel/metric v1.38.0 -## explicit; go 1.23.0 +# go.opentelemetry.io/otel/metric v1.39.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/sdk v1.38.0 -## explicit; go 1.23.0 +# go.opentelemetry.io/otel/sdk v1.39.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel/sdk go.opentelemetry.io/otel/sdk/instrumentation -go.opentelemetry.io/otel/sdk/internal/env go.opentelemetry.io/otel/sdk/internal/x go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace -go.opentelemetry.io/otel/sdk/trace/internal/x -# go.opentelemetry.io/otel/trace v1.38.0 -## explicit; go 1.23.0 +go.opentelemetry.io/otel/sdk/trace/internal/env +go.opentelemetry.io/otel/sdk/trace/internal/observ +# go.opentelemetry.io/otel/trace v1.39.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded go.opentelemetry.io/otel/trace/internal/telemetry @@ -2225,13 +2227,13 @@ go.uber.org/zap/internal/pool go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore go.uber.org/zap/zapgrpc -# go.yaml.in/yaml/v2 v2.4.2 +# go.yaml.in/yaml/v2 v2.4.3 ## explicit; go 1.15 go.yaml.in/yaml/v2 # go.yaml.in/yaml/v3 v3.0.4 ## explicit; go 1.16 go.yaml.in/yaml/v3 -# golang.org/x/crypto v0.45.0 +# golang.org/x/crypto v0.46.0 ## explicit; go 1.24.0 golang.org/x/crypto/argon2 golang.org/x/crypto/bcrypt @@ -2283,7 +2285,7 @@ golang.org/x/image/vector golang.org/x/image/vp8 golang.org/x/image/vp8l golang.org/x/image/webp -# golang.org/x/mod v0.29.0 +# golang.org/x/mod v0.30.0 ## explicit; go 1.24.0 golang.org/x/mod/internal/lazyregexp golang.org/x/mod/module @@ -2316,7 +2318,7 @@ golang.org/x/net/trace ## explicit; go 1.24.0 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sync v0.18.0 +# golang.org/x/sync v0.19.0 ## explicit; go 1.24.0 golang.org/x/sync/errgroup golang.org/x/sync/semaphore @@ -2335,7 +2337,7 @@ golang.org/x/sys/windows/svc/mgr # golang.org/x/term v0.39.0 ## explicit; go 1.24.0 golang.org/x/term -# golang.org/x/text v0.31.0 +# golang.org/x/text v0.32.0 ## explicit; go 1.24.0 golang.org/x/text/cases golang.org/x/text/encoding @@ -2363,7 +2365,7 @@ golang.org/x/text/width # golang.org/x/time v0.14.0 ## explicit; go 1.24.0 golang.org/x/time/rate -# golang.org/x/tools v0.38.0 +# golang.org/x/tools v0.39.0 ## explicit; go 1.24.0 golang.org/x/tools/cover golang.org/x/tools/go/ast/astutil @@ -2393,16 +2395,16 @@ golang.org/x/tools/internal/versions # google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb ## explicit; go 1.23.0 google.golang.org/genproto/protobuf/field_mask -# google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 -## explicit; go 1.23.0 +# google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 +## explicit; go 1.24.0 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 -## explicit; go 1.23.0 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 +## explicit; go 1.24.0 google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.76.0 +# google.golang.org/grpc v1.77.0 ## explicit; go 1.24.0 google.golang.org/grpc google.golang.org/grpc/attributes @@ -2413,7 +2415,6 @@ google.golang.org/grpc/balancer/endpointsharding google.golang.org/grpc/balancer/grpclb/state google.golang.org/grpc/balancer/pickfirst google.golang.org/grpc/balancer/pickfirst/internal -google.golang.org/grpc/balancer/pickfirst/pickfirstleaf google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 google.golang.org/grpc/channelz @@ -2423,6 +2424,7 @@ google.golang.org/grpc/credentials google.golang.org/grpc/credentials/insecure google.golang.org/grpc/encoding google.golang.org/grpc/encoding/gzip +google.golang.org/grpc/encoding/internal google.golang.org/grpc/encoding/proto google.golang.org/grpc/experimental/stats google.golang.org/grpc/grpclog @@ -2472,7 +2474,7 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.36.10 +# google.golang.org/protobuf v1.36.11 ## explicit; go 1.23 google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/protojson